aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp6
-rw-r--r--lib/ARCMigrate/CMakeLists.txt1
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp2
-rw-r--r--lib/ARCMigrate/ObjCMT.cpp124
-rw-r--r--lib/ARCMigrate/TransAPIUses.cpp2
-rw-r--r--lib/ARCMigrate/TransAutoreleasePool.cpp28
-rw-r--r--lib/ARCMigrate/TransGCAttrs.cpp17
-rw-r--r--lib/ARCMigrate/TransGCCalls.cpp2
-rw-r--r--lib/ARCMigrate/TransProtectedScope.cpp7
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp22
-rw-r--r--lib/ARCMigrate/TransUnbridgedCasts.cpp19
-rw-r--r--lib/ARCMigrate/TransformActions.cpp2
-rw-r--r--lib/ARCMigrate/Transforms.cpp6
-rw-r--r--lib/AST/APValue.cpp20
-rw-r--r--lib/AST/ASTContext.cpp629
-rw-r--r--lib/AST/ASTDiagnostic.cpp14
-rw-r--r--lib/AST/ASTDumper.cpp1721
-rw-r--r--lib/AST/ASTImporter.cpp8084
-rw-r--r--lib/AST/ASTImporterLookupTable.cpp129
-rw-r--r--lib/AST/ASTStructuralEquivalence.cpp285
-rw-r--r--lib/AST/CMakeLists.txt6
-rw-r--r--lib/AST/CXXInheritance.cpp2
-rw-r--r--lib/AST/CommentLexer.cpp2
-rw-r--r--lib/AST/CommentParser.cpp1
-rw-r--r--lib/AST/CommentSema.cpp7
-rw-r--r--lib/AST/Decl.cpp230
-rw-r--r--lib/AST/DeclBase.cpp135
-rw-r--r--lib/AST/DeclCXX.cpp122
-rw-r--r--lib/AST/DeclObjC.cpp70
-rw-r--r--lib/AST/DeclOpenMP.cpp42
-rw-r--r--lib/AST/DeclPrinter.cpp19
-rw-r--r--lib/AST/DeclTemplate.cpp46
-rw-r--r--lib/AST/DeclarationName.cpp399
-rw-r--r--lib/AST/Expr.cpp669
-rw-r--r--lib/AST/ExprCXX.cpp1017
-rw-r--r--lib/AST/ExprClassification.cpp3
-rw-r--r--lib/AST/ExprConstant.cpp570
-rw-r--r--lib/AST/ExternalASTMerger.cpp10
-rw-r--r--lib/AST/FormatString.cpp (renamed from lib/Analysis/FormatString.cpp)52
-rw-r--r--lib/AST/FormatStringParsing.h (renamed from lib/Analysis/FormatStringParsing.h)6
-rw-r--r--lib/AST/ItaniumMangle.cpp65
-rw-r--r--lib/AST/Linkage.h11
-rw-r--r--lib/AST/Mangle.cpp6
-rw-r--r--lib/AST/MicrosoftMangle.cpp302
-rw-r--r--lib/AST/NSAPI.cpp17
-rw-r--r--lib/AST/NestedNameSpecifier.cpp37
-rw-r--r--lib/AST/ODRHash.cpp264
-rw-r--r--lib/AST/OSLog.cpp (renamed from lib/Analysis/OSLog.cpp)21
-rw-r--r--lib/AST/OpenMPClause.cpp579
-rw-r--r--lib/AST/ParentMap.cpp2
-rw-r--r--lib/AST/PrintfFormatString.cpp (renamed from lib/Analysis/PrintfFormatString.cpp)70
-rw-r--r--lib/AST/RawCommentList.cpp14
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp13
-rw-r--r--lib/AST/ScanfFormatString.cpp (renamed from lib/Analysis/ScanfFormatString.cpp)4
-rw-r--r--lib/AST/SelectorLocationsKind.cpp4
-rw-r--r--lib/AST/Stmt.cpp357
-rw-r--r--lib/AST/StmtCXX.cpp3
-rw-r--r--lib/AST/StmtObjC.cpp8
-rw-r--r--lib/AST/StmtOpenMP.cpp12
-rw-r--r--lib/AST/StmtPrinter.cpp619
-rw-r--r--lib/AST/StmtProfile.cpp23
-rw-r--r--lib/AST/TextNodeDumper.cpp1168
-rw-r--r--lib/AST/Type.cpp344
-rw-r--r--lib/AST/TypeLoc.cpp13
-rw-r--r--lib/AST/TypePrinter.cpp247
-rw-r--r--lib/AST/VTableBuilder.cpp33
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp27
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp22
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp106
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp47
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp19
-rw-r--r--lib/Analysis/BodyFarm.cpp60
-rw-r--r--lib/Analysis/CFG.cpp83
-rw-r--r--lib/Analysis/CMakeLists.txt7
-rw-r--r--lib/Analysis/CallGraph.cpp2
-rw-r--r--lib/Analysis/CloneDetection.cpp19
-rw-r--r--lib/Analysis/Consumed.cpp26
-rw-r--r--lib/Analysis/ExprMutationAnalyzer.cpp445
-rw-r--r--lib/Analysis/LiveVariables.cpp61
-rw-r--r--lib/Analysis/ProgramPoint.cpp175
-rw-r--r--lib/Analysis/PseudoConstantAnalysis.cpp226
-rw-r--r--lib/Analysis/ReachableCode.cpp22
-rw-r--r--lib/Analysis/ThreadSafety.cpp410
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp59
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp18
-rw-r--r--lib/Basic/Attributes.cpp9
-rw-r--r--lib/Basic/Builtins.cpp2
-rw-r--r--lib/Basic/CMakeLists.txt5
-rw-r--r--lib/Basic/CodeGenOptions.cpp (renamed from lib/Frontend/CodeGenOptions.cpp)4
-rw-r--r--lib/Basic/Cuda.cpp27
-rw-r--r--lib/Basic/Diagnostic.cpp15
-rw-r--r--lib/Basic/FileManager.cpp97
-rw-r--r--lib/Basic/FileSystemStatCache.cpp33
-rw-r--r--lib/Basic/FixedPoint.cpp115
-rw-r--r--lib/Basic/IdentifierTable.cpp110
-rw-r--r--lib/Basic/Module.cpp47
-rw-r--r--lib/Basic/OpenMPKinds.cpp46
-rw-r--r--lib/Basic/SourceLocation.cpp55
-rw-r--r--lib/Basic/SourceManager.cpp80
-rw-r--r--lib/Basic/TargetInfo.cpp7
-rw-r--r--lib/Basic/Targets.cpp26
-rw-r--r--lib/Basic/Targets/AArch64.cpp17
-rw-r--r--lib/Basic/Targets/AArch64.h1
-rw-r--r--lib/Basic/Targets/AMDGPU.cpp84
-rw-r--r--lib/Basic/Targets/AMDGPU.h214
-rw-r--r--lib/Basic/Targets/ARC.cpp25
-rw-r--r--lib/Basic/Targets/ARC.h74
-rw-r--r--lib/Basic/Targets/ARM.cpp17
-rw-r--r--lib/Basic/Targets/Hexagon.cpp18
-rw-r--r--lib/Basic/Targets/Mips.cpp65
-rw-r--r--lib/Basic/Targets/Mips.h29
-rw-r--r--lib/Basic/Targets/NVPTX.cpp5
-rw-r--r--lib/Basic/Targets/Nios2.cpp56
-rw-r--r--lib/Basic/Targets/Nios2.h151
-rw-r--r--lib/Basic/Targets/OSTargets.cpp2
-rw-r--r--lib/Basic/Targets/OSTargets.h49
-rw-r--r--lib/Basic/Targets/PPC.cpp34
-rw-r--r--lib/Basic/Targets/PPC.h5
-rw-r--r--lib/Basic/Targets/Sparc.h2
-rw-r--r--lib/Basic/Targets/WebAssembly.cpp49
-rw-r--r--lib/Basic/Targets/WebAssembly.h15
-rw-r--r--lib/Basic/Targets/X86.cpp30
-rw-r--r--lib/Basic/Targets/X86.h13
-rw-r--r--lib/Basic/VirtualFileSystem.cpp2026
-rw-r--r--lib/CodeGen/BackendUtil.cpp129
-rw-r--r--lib/CodeGen/CGAtomic.cpp28
-rw-r--r--lib/CodeGen/CGBlocks.cpp886
-rw-r--r--lib/CodeGen/CGBlocks.h10
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1787
-rw-r--r--lib/CodeGen/CGCUDANV.cpp33
-rw-r--r--lib/CodeGen/CGCXX.cpp4
-rw-r--r--lib/CodeGen/CGCXXABI.cpp2
-rw-r--r--lib/CodeGen/CGCall.cpp128
-rw-r--r--lib/CodeGen/CGCall.h14
-rw-r--r--lib/CodeGen/CGClass.cpp33
-rw-r--r--lib/CodeGen/CGCleanup.cpp4
-rw-r--r--lib/CodeGen/CGCoroutine.cpp14
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp505
-rw-r--r--lib/CodeGen/CGDebugInfo.h64
-rw-r--r--lib/CodeGen/CGDecl.cpp568
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp103
-rw-r--r--lib/CodeGen/CGException.cpp45
-rw-r--r--lib/CodeGen/CGExpr.cpp159
-rw-r--r--lib/CodeGen/CGExprAgg.cpp9
-rw-r--r--lib/CodeGen/CGExprCXX.cpp36
-rw-r--r--lib/CodeGen/CGExprComplex.cpp8
-rw-r--r--lib/CodeGen/CGExprConstant.cpp46
-rw-r--r--lib/CodeGen/CGExprScalar.cpp455
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp129
-rw-r--r--lib/CodeGen/CGLoopInfo.h33
-rw-r--r--lib/CodeGen/CGNonTrivialStruct.cpp31
-rw-r--r--lib/CodeGen/CGObjC.cpp511
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp335
-rw-r--r--lib/CodeGen/CGObjCMac.cpp84
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp60
-rw-r--r--lib/CodeGen/CGObjCRuntime.h7
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp40
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h5
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp1079
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h117
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp2371
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h99
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp2
-rw-r--r--lib/CodeGen/CGStmt.cpp54
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp443
-rw-r--r--lib/CodeGen/CGVTT.cpp6
-rw-r--r--lib/CodeGen/CGVTables.cpp27
-rw-r--r--lib/CodeGen/CGValue.h5
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp1
-rw-r--r--lib/CodeGen/CodeGenAction.cpp17
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp325
-rw-r--r--lib/CodeGen/CodeGenFunction.h179
-rw-r--r--lib/CodeGen/CodeGenModule.cpp648
-rw-r--r--lib/CodeGen/CodeGenModule.h50
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp11
-rw-r--r--lib/CodeGen/CodeGenPGO.h1
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp2
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp3
-rw-r--r--lib/CodeGen/CodeGenTypes.h1
-rw-r--r--lib/CodeGen/ConstantEmitter.h3
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp107
-rw-r--r--lib/CodeGen/CoverageMappingGen.h1
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp103
-rw-r--r--lib/CodeGen/MacroPPCallbacks.cpp17
-rw-r--r--lib/CodeGen/MacroPPCallbacks.h5
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp22
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp34
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp5
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp40
-rw-r--r--lib/CodeGen/TargetInfo.cpp331
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp2
-rw-r--r--lib/CodeGen/VarBypassDetector.h1
-rw-r--r--lib/CrossTU/CrossTranslationUnit.cpp138
-rw-r--r--lib/Driver/Action.cpp16
-rw-r--r--lib/Driver/CMakeLists.txt5
-rw-r--r--lib/Driver/Compilation.cpp2
-rw-r--r--lib/Driver/DarwinSDKInfo.cpp44
-rw-r--r--lib/Driver/Distro.cpp9
-rw-r--r--lib/Driver/Driver.cpp478
-rw-r--r--lib/Driver/Job.cpp19
-rw-r--r--lib/Driver/SanitizerArgs.cpp51
-rw-r--r--lib/Driver/ToolChain.cpp49
-rw-r--r--lib/Driver/ToolChains/AMDGPU.cpp13
-rw-r--r--lib/Driver/ToolChains/AMDGPU.h4
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.cpp185
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.h5
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.cpp58
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.h2
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.cpp12
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.cpp10
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.h2
-rw-r--r--lib/Driver/ToolChains/Arch/X86.cpp34
-rw-r--r--lib/Driver/ToolChains/BareMetal.cpp9
-rw-r--r--lib/Driver/ToolChains/Clang.cpp889
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp200
-rw-r--r--lib/Driver/ToolChains/CommonArgs.h6
-rw-r--r--lib/Driver/ToolChains/CrossWindows.cpp1
-rw-r--r--lib/Driver/ToolChains/Cuda.cpp122
-rw-r--r--lib/Driver/ToolChains/Cuda.h2
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp322
-rw-r--r--lib/Driver/ToolChains/Darwin.h21
-rw-r--r--lib/Driver/ToolChains/FreeBSD.cpp2
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp22
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp199
-rw-r--r--lib/Driver/ToolChains/HIP.cpp59
-rw-r--r--lib/Driver/ToolChains/HIP.h5
-rw-r--r--lib/Driver/ToolChains/Hexagon.cpp17
-rw-r--r--lib/Driver/ToolChains/Hexagon.h3
-rw-r--r--lib/Driver/ToolChains/Hurd.cpp169
-rw-r--r--lib/Driver/ToolChains/Hurd.h46
-rw-r--r--lib/Driver/ToolChains/Linux.cpp147
-rw-r--r--lib/Driver/ToolChains/Linux.h2
-rw-r--r--lib/Driver/ToolChains/MSP430.cpp233
-rw-r--r--lib/Driver/ToolChains/MSP430.h71
-rw-r--r--lib/Driver/ToolChains/MSVC.cpp35
-rw-r--r--lib/Driver/ToolChains/MSVC.h13
-rw-r--r--lib/Driver/ToolChains/MinGW.cpp66
-rw-r--r--lib/Driver/ToolChains/MinGW.h6
-rw-r--r--lib/Driver/ToolChains/Minix.cpp4
-rw-r--r--lib/Driver/ToolChains/MipsLinux.h2
-rw-r--r--lib/Driver/ToolChains/NetBSD.cpp14
-rw-r--r--lib/Driver/ToolChains/NetBSD.h4
-rw-r--r--lib/Driver/ToolChains/OpenBSD.cpp66
-rw-r--r--lib/Driver/ToolChains/OpenBSD.h8
-rw-r--r--lib/Driver/ToolChains/PS4CPU.cpp6
-rw-r--r--lib/Driver/ToolChains/RISCVToolchain.cpp (renamed from lib/Driver/ToolChains/RISCV.cpp)38
-rw-r--r--lib/Driver/ToolChains/RISCVToolchain.h (renamed from lib/Driver/ToolChains/RISCV.h)14
-rw-r--r--lib/Driver/ToolChains/Solaris.h2
-rw-r--r--lib/Driver/ToolChains/WebAssembly.cpp55
-rw-r--r--lib/Driver/ToolChains/WebAssembly.h17
-rw-r--r--lib/Driver/Types.cpp2
-rw-r--r--lib/Driver/XRayArgs.cpp33
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp20
-rw-r--r--lib/Format/BreakableToken.cpp38
-rw-r--r--lib/Format/ContinuationIndenter.cpp54
-rw-r--r--lib/Format/Format.cpp216
-rw-r--r--lib/Format/FormatToken.h29
-rw-r--r--lib/Format/FormatTokenLexer.cpp11
-rw-r--r--lib/Format/FormatTokenLexer.h4
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.cpp7
-rw-r--r--lib/Format/TokenAnnotator.cpp70
-rw-r--r--lib/Format/TokenAnnotator.h7
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp23
-rw-r--r--lib/Format/UnwrappedLineParser.cpp53
-rw-r--r--lib/Format/UnwrappedLineParser.h1
-rw-r--r--lib/Format/WhitespaceManager.cpp10
-rw-r--r--lib/Frontend/ASTConsumers.cpp362
-rw-r--r--lib/Frontend/ASTMerge.cpp11
-rw-r--r--lib/Frontend/ASTUnit.cpp52
-rw-r--r--lib/Frontend/CMakeLists.txt3
-rw-r--r--lib/Frontend/CacheTokens.cpp700
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp1
-rw-r--r--lib/Frontend/CompilerInstance.cpp69
-rw-r--r--lib/Frontend/CompilerInvocation.cpp409
-rw-r--r--lib/Frontend/CreateInvocationFromCommandLine.cpp2
-rw-r--r--lib/Frontend/DependencyFile.cpp38
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp4
-rw-r--r--lib/Frontend/FrontendAction.cpp67
-rw-r--r--lib/Frontend/FrontendActions.cpp103
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp27
-rw-r--r--lib/Frontend/InitPreprocessor.cpp41
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp4
-rw-r--r--lib/Frontend/PrecompiledPreamble.cpp52
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp5
-rw-r--r--lib/Frontend/Rewrite/FrontendActions.cpp7
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp1
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp339
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp262
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp25
-rw-r--r--lib/Headers/CMakeLists.txt6
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h8
-rw-r--r--lib/Headers/adxintrin.h4
-rw-r--r--lib/Headers/altivec.h123
-rw-r--r--lib/Headers/avx512bwintrin.h839
-rw-r--r--lib/Headers/avx512dqintrin.h302
-rw-r--r--lib/Headers/avx512fintrin.h177
-rw-r--r--lib/Headers/avx512pfintrin.h32
-rw-r--r--lib/Headers/avx512vbmi2intrin.h158
-rw-r--r--lib/Headers/avx512vbmiintrin.h26
-rw-r--r--lib/Headers/avx512vbmivlintrin.h56
-rw-r--r--lib/Headers/avx512vlbwintrin.h75
-rw-r--r--lib/Headers/avx512vlintrin.h349
-rw-r--r--lib/Headers/avx512vlvbmi2intrin.h312
-rw-r--r--lib/Headers/bmiintrin.h10
-rw-r--r--lib/Headers/cuda_wrappers/new6
-rw-r--r--lib/Headers/emmintrin.h107
-rw-r--r--lib/Headers/float.h12
-rw-r--r--lib/Headers/immintrin.h59
-rw-r--r--lib/Headers/intrin.h546
-rw-r--r--lib/Headers/lzcntintrin.h22
-rw-r--r--lib/Headers/opencl-c.h659
-rw-r--r--lib/Headers/unwind.h4
-rw-r--r--lib/Headers/vecintrin.h6
-rw-r--r--lib/Index/CommentToXML.cpp1
-rw-r--r--lib/Index/IndexBody.cpp41
-rw-r--r--lib/Index/IndexDecl.cpp2
-rw-r--r--lib/Index/IndexSymbol.cpp5
-rw-r--r--lib/Index/IndexTypeSourceInfo.cpp14
-rw-r--r--lib/Index/IndexingAction.cpp24
-rw-r--r--lib/Index/IndexingContext.cpp26
-rw-r--r--lib/Index/SimpleFormatContext.h10
-rw-r--r--lib/Index/USRGeneration.cpp54
-rw-r--r--lib/Lex/CMakeLists.txt1
-rw-r--r--lib/Lex/HeaderMap.cpp5
-rw-r--r--lib/Lex/HeaderSearch.cpp49
-rw-r--r--lib/Lex/Lexer.cpp78
-rw-r--r--lib/Lex/LiteralSupport.cpp2
-rw-r--r--lib/Lex/MacroInfo.cpp3
-rw-r--r--lib/Lex/ModuleMap.cpp66
-rw-r--r--lib/Lex/PPDirectives.cpp201
-rw-r--r--lib/Lex/PPLexerChange.cpp109
-rw-r--r--lib/Lex/PPMacroExpansion.cpp13
-rw-r--r--lib/Lex/PTHLexer.cpp748
-rw-r--r--lib/Lex/Pragma.cpp57
-rw-r--r--lib/Lex/Preprocessor.cpp68
-rw-r--r--lib/Lex/TokenConcatenation.cpp4
-rw-r--r--lib/Parse/ParseAST.cpp34
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp10
-rw-r--r--lib/Parse/ParseDecl.cpp124
-rw-r--r--lib/Parse/ParseDeclCXX.cpp200
-rw-r--r--lib/Parse/ParseExpr.cpp86
-rw-r--r--lib/Parse/ParseExprCXX.cpp111
-rw-r--r--lib/Parse/ParseInit.cpp6
-rw-r--r--lib/Parse/ParseObjc.cpp41
-rw-r--r--lib/Parse/ParseOpenMP.cpp390
-rw-r--r--lib/Parse/ParsePragma.cpp273
-rw-r--r--lib/Parse/ParseStmt.cpp151
-rw-r--r--lib/Parse/ParseTemplate.cpp77
-rw-r--r--lib/Parse/ParseTentative.cpp105
-rw-r--r--lib/Parse/Parser.cpp23
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp2
-rw-r--r--lib/Rewrite/RewriteRope.cpp4
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp135
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp222
-rw-r--r--lib/Sema/DeclSpec.cpp63
-rw-r--r--lib/Sema/IdentifierResolver.cpp14
-rw-r--r--lib/Sema/JumpDiagnostics.cpp16
-rw-r--r--lib/Sema/ParsedAttr.cpp49
-rw-r--r--lib/Sema/ScopeInfo.cpp2
-rw-r--r--lib/Sema/Sema.cpp135
-rw-r--r--lib/Sema/SemaAccess.cpp47
-rw-r--r--lib/Sema/SemaAttr.cpp112
-rw-r--r--lib/Sema/SemaCUDA.cpp2
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp24
-rw-r--r--lib/Sema/SemaCast.cpp105
-rw-r--r--lib/Sema/SemaChecking.cpp2758
-rw-r--r--lib/Sema/SemaCodeComplete.cpp3022
-rw-r--r--lib/Sema/SemaCoroutine.cpp43
-rw-r--r--lib/Sema/SemaDecl.cpp959
-rw-r--r--lib/Sema/SemaDeclAttr.cpp1214
-rw-r--r--lib/Sema/SemaDeclCXX.cpp814
-rw-r--r--lib/Sema/SemaDeclObjC.cpp52
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp54
-rw-r--r--lib/Sema/SemaExpr.cpp1157
-rw-r--r--lib/Sema/SemaExprCXX.cpp389
-rw-r--r--lib/Sema/SemaExprMember.cpp30
-rw-r--r--lib/Sema/SemaExprObjC.cpp240
-rw-r--r--lib/Sema/SemaInit.cpp710
-rw-r--r--lib/Sema/SemaLambda.cpp157
-rw-r--r--lib/Sema/SemaLookup.cpp161
-rw-r--r--lib/Sema/SemaObjCProperty.cpp42
-rw-r--r--lib/Sema/SemaOpenMP.cpp1749
-rw-r--r--lib/Sema/SemaOverload.cpp628
-rw-r--r--lib/Sema/SemaPseudoObject.cpp2
-rw-r--r--lib/Sema/SemaStmt.cpp429
-rw-r--r--lib/Sema/SemaStmtAsm.cpp160
-rw-r--r--lib/Sema/SemaStmtAttr.cpp99
-rw-r--r--lib/Sema/SemaTemplate.cpp396
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp19
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp53
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp352
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp9
-rw-r--r--lib/Sema/SemaType.cpp1013
-rw-r--r--lib/Sema/TreeTransform.h703
-rw-r--r--lib/Serialization/ASTCommon.cpp6
-rw-r--r--lib/Serialization/ASTReader.cpp1024
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp300
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp1294
-rw-r--r--lib/Serialization/ASTWriter.cpp555
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp110
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp675
-rw-r--r--lib/Serialization/CMakeLists.txt1
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp2
-rw-r--r--lib/Serialization/ModuleManager.cpp19
-rw-r--r--lib/Serialization/PCHContainerOperations.cpp (renamed from lib/Frontend/PCHContainerOperations.cpp)4
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp24
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h31
-rw-r--r--lib/StaticAnalyzer/Checkers/AllocationState.h5
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp27
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp102
-rw-r--r--lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt14
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp150
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp46
-rw-r--r--lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp103
-rw-r--r--lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp22
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp31
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/ChrootChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ClangCheckers.cpp32
-rw-r--r--lib/StaticAnalyzer/Checkers/ClangSACheckers.h37
-rw-r--r--lib/StaticAnalyzer/Checkers/CloneChecker.cpp16
-rw-r--r--lib/StaticAnalyzer/Checkers/ConversionChecker.cpp57
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/DebugCheckers.cpp25
-rw-r--r--lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp15
-rw-r--r--lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp128
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp115
-rw-r--r--lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/GTestChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp216
-rw-r--r--lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp158
-rw-r--r--lib/StaticAnalyzer/Checkers/InterCheckerAPI.h3
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorChecker.cpp1414
-rw-r--r--lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h1
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp5
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp53
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp161
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp525
-rw-r--r--lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/MoveChecker.cpp740
-rw-r--r--lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp42
-rw-r--r--lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp19
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp45
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp4156
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp1547
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h393
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp794
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h85
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp36
-rw-r--r--lib/StaticAnalyzer/Checkers/SelectorExtras.h46
-rw-r--r--lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp36
-rw-r--r--lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/TraversalChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp195
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp11
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h349
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp538
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp282
-rw-r--r--lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp688
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/ValistChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/VforkChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/AnalysisManager.cpp49
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp483
-rw-r--r--lib/StaticAnalyzer/Core/BasicValueFactory.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp141
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp750
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt9
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp162
-rw-r--r--lib/StaticAnalyzer/Core/Checker.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/CheckerHelpers.cpp8
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/CheckerRegistry.cpp190
-rw-r--r--lib/StaticAnalyzer/Core/CommonBugCategories.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp32
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp5
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp22
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp30
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp545
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp125
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp93
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineObjC.cpp18
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp63
-rw-r--r--lib/StaticAnalyzer/Core/LoopWidening.cpp11
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp81
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp44
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp892
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp99
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.cpp5
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp175
-rw-r--r--lib/StaticAnalyzer/Core/RetainSummaryManager.cpp1229
-rw-r--r--lib/StaticAnalyzer/Core/SMTConstraintManager.cpp181
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp15
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp6
-rw-r--r--lib/StaticAnalyzer/Core/SarifDiagnostics.cpp349
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp26
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp45
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp17
-rw-r--r--lib/StaticAnalyzer/Core/TaintManager.cpp23
-rw-r--r--lib/StaticAnalyzer/Core/WorkList.cpp62
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp304
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp156
-rw-r--r--lib/StaticAnalyzer/Frontend/CMakeLists.txt1
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp196
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp247
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp2
-rw-r--r--lib/StaticAnalyzer/README.txt12
-rw-r--r--lib/Tooling/ASTDiff/ASTDiff.cpp7
-rw-r--r--lib/Tooling/AllTUsExecution.cpp32
-rw-r--r--lib/Tooling/CMakeLists.txt1
-rw-r--r--lib/Tooling/CompilationDatabase.cpp39
-rw-r--r--lib/Tooling/Core/Diagnostic.cpp9
-rw-r--r--lib/Tooling/Core/Lookup.cpp44
-rw-r--r--lib/Tooling/Core/Replacement.cpp17
-rw-r--r--lib/Tooling/Execution.cpp2
-rw-r--r--lib/Tooling/Inclusions/HeaderIncludes.cpp12
-rw-r--r--lib/Tooling/InterpolatingCompilationDatabase.cpp277
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp5
-rw-r--r--lib/Tooling/Refactoring/ASTSelection.cpp2
-rw-r--r--lib/Tooling/Refactoring/Extract/Extract.cpp6
-rw-r--r--lib/Tooling/Refactoring/Rename/USRFinder.cpp4
-rw-r--r--lib/Tooling/Refactoring/Rename/USRLocFinder.cpp8
-rw-r--r--lib/Tooling/StandaloneExecution.cpp2
-rw-r--r--lib/Tooling/Tooling.cpp123
583 files changed, 55835 insertions, 38806 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index 3c7b593be6d4..6da87903a488 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -190,8 +190,6 @@ createInvocationForMigration(CompilerInvocation &origCI,
PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile);
PPOpts.ImplicitPCHInclude.clear();
}
- // FIXME: Get the original header of a PTH as well.
- CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
std::string define = getARCMTMacroName();
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
@@ -241,7 +239,7 @@ bool arcmt::checkForManualIssues(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, bool emitPremigrationARCErrors,
StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC1)
+ if (!origCI.getLangOpts()->ObjC)
return false;
LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
@@ -342,7 +340,7 @@ applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, StringRef outputDir,
bool emitPremigrationARCErrors, StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC1)
+ if (!origCI.getLangOpts()->ObjC)
return false;
LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
diff --git a/lib/ARCMigrate/CMakeLists.txt b/lib/ARCMigrate/CMakeLists.txt
index b716a20fe63f..efdff279aaed 100644
--- a/lib/ARCMigrate/CMakeLists.txt
+++ b/lib/ARCMigrate/CMakeLists.txt
@@ -35,4 +35,5 @@ add_clang_library(clangARCMigrate
clangSema
clangSerialization
clangStaticAnalyzerCheckers
+ clangStaticAnalyzerCore
)
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index ccc8c9ee30ff..225f47119b00 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -226,7 +226,7 @@ void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
const FileEntry *file = FileMgr->getFile(filePath);
- // If we are updating a file that overriden an original file,
+ // If we are updating a file that overridden an original file,
// actually update the original file.
llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
I = ToFromMappings.find(file);
diff --git a/lib/ARCMigrate/ObjCMT.cpp b/lib/ARCMigrate/ObjCMT.cpp
index 7e9bc6a5bdc6..6950ce0e12f3 100644
--- a/lib/ARCMigrate/ObjCMT.cpp
+++ b/lib/ARCMigrate/ObjCMT.cpp
@@ -27,7 +27,7 @@
#include "clang/Lex/PPConditionalDirectiveRecord.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
+#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Path.h"
@@ -36,7 +36,7 @@
using namespace clang;
using namespace arcmt;
-using namespace ento::objc_retain;
+using namespace ento;
namespace {
@@ -261,7 +261,7 @@ namespace {
if (IsGetter) {
// Find space location range between receiver expression and getter method.
SourceLocation BegLoc =
- ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getEndLoc();
BegLoc = PP.getLocForEndOfToken(BegLoc);
SourceLocation EndLoc = Msg->getSelectorLoc(0);
SourceRange SpaceRange(BegLoc, EndLoc);
@@ -269,7 +269,7 @@ namespace {
// rewrite getter method expression into: receiver.property or
// (receiver).property
if (NeedsParen) {
- commit.insertBefore(receiver->getLocStart(), "(");
+ commit.insertBefore(receiver->getBeginLoc(), "(");
PropertyDotString = ").";
}
else
@@ -291,9 +291,9 @@ namespace {
if (!RHS)
return false;
SourceLocation BegLoc =
- ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd();
+ ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getEndLoc();
BegLoc = PP.getLocForEndOfToken(BegLoc);
- SourceLocation EndLoc = RHS->getLocStart();
+ SourceLocation EndLoc = RHS->getBeginLoc();
EndLoc = EndLoc.getLocWithOffset(-1);
const char *colon = PP.getSourceManager().getCharacterData(EndLoc);
// Add a space after '=' if there is no space between RHS and '='
@@ -545,14 +545,14 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
SourceLocation EndGetterSelectorLoc =
StartGetterSelectorLoc.getLocWithOffset(GetterSelector.getNameForSlot(0).size());
- commit.replace(CharSourceRange::getCharRange(Getter->getLocStart(),
+ commit.replace(CharSourceRange::getCharRange(Getter->getBeginLoc(),
EndGetterSelectorLoc),
PropertyString);
if (Setter && AvailabilityArgsMatch) {
SourceLocation EndLoc = Setter->getDeclaratorEndLoc();
// Get location past ';'
EndLoc = EndLoc.getLocWithOffset(1);
- SourceLocation BeginOfSetterDclLoc = Setter->getLocStart();
+ SourceLocation BeginOfSetterDclLoc = Setter->getBeginLoc();
// FIXME. This assumes that setter decl; is immediately preceded by eoln.
// It is trying to remove the setter method decl. line entirely.
BeginOfSetterDclLoc = BeginOfSetterDclLoc.getLocWithOffset(-1);
@@ -720,32 +720,33 @@ static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl,
ClassString += TypedefDcl->getIdentifier()->getName();
ClassString += ')';
- SourceRange R(EnumDcl->getLocStart(), EnumDcl->getLocStart());
+ SourceRange R(EnumDcl->getBeginLoc(), EnumDcl->getBeginLoc());
commit.replace(R, ClassString);
- SourceLocation EndOfEnumDclLoc = EnumDcl->getLocEnd();
+ SourceLocation EndOfEnumDclLoc = EnumDcl->getEndLoc();
EndOfEnumDclLoc = trans::findSemiAfterLocation(EndOfEnumDclLoc,
NS.getASTContext(), /*IsDecl*/true);
if (EndOfEnumDclLoc.isValid()) {
- SourceRange EnumDclRange(EnumDcl->getLocStart(), EndOfEnumDclLoc);
- commit.insertFromRange(TypedefDcl->getLocStart(), EnumDclRange);
+ SourceRange EnumDclRange(EnumDcl->getBeginLoc(), EndOfEnumDclLoc);
+ commit.insertFromRange(TypedefDcl->getBeginLoc(), EnumDclRange);
}
else
return false;
- SourceLocation EndTypedefDclLoc = TypedefDcl->getLocEnd();
+ SourceLocation EndTypedefDclLoc = TypedefDcl->getEndLoc();
EndTypedefDclLoc = trans::findSemiAfterLocation(EndTypedefDclLoc,
NS.getASTContext(), /*IsDecl*/true);
if (EndTypedefDclLoc.isValid()) {
- SourceRange TDRange(TypedefDcl->getLocStart(), EndTypedefDclLoc);
+ SourceRange TDRange(TypedefDcl->getBeginLoc(), EndTypedefDclLoc);
commit.remove(TDRange);
}
else
return false;
- EndOfEnumDclLoc = trans::findLocationAfterSemi(EnumDcl->getLocEnd(), NS.getASTContext(),
- /*IsDecl*/true);
+ EndOfEnumDclLoc =
+ trans::findLocationAfterSemi(EnumDcl->getEndLoc(), NS.getASTContext(),
+ /*IsDecl*/ true);
if (EndOfEnumDclLoc.isValid()) {
- SourceLocation BeginOfEnumDclLoc = EnumDcl->getLocStart();
+ SourceLocation BeginOfEnumDclLoc = EnumDcl->getBeginLoc();
// FIXME. This assumes that enum decl; is immediately preceded by eoln.
// It is trying to remove the enum decl. lines entirely.
BeginOfEnumDclLoc = BeginOfEnumDclLoc.getLocWithOffset(-1);
@@ -775,12 +776,13 @@ static void rewriteToNSMacroDecl(ASTContext &Ctx,
SourceLocation EndLoc = EnumDcl->getBraceRange().getBegin();
if (EndLoc.isInvalid())
return;
- CharSourceRange R = CharSourceRange::getCharRange(EnumDcl->getLocStart(), EndLoc);
+ CharSourceRange R =
+ CharSourceRange::getCharRange(EnumDcl->getBeginLoc(), EndLoc);
commit.replace(R, ClassString);
// This is to remove spaces between '}' and typedef name.
- SourceLocation StartTypedefLoc = EnumDcl->getLocEnd();
+ SourceLocation StartTypedefLoc = EnumDcl->getEndLoc();
StartTypedefLoc = StartTypedefLoc.getLocWithOffset(+1);
- SourceLocation EndTypedefLoc = TypedefDcl->getLocEnd();
+ SourceLocation EndTypedefLoc = TypedefDcl->getEndLoc();
commit.remove(SourceRange(StartTypedefLoc, EndTypedefLoc));
}
@@ -811,7 +813,7 @@ static bool UseNSOptionsMacro(Preprocessor &PP, ASTContext &Ctx,
}
if (AllHexdecimalEnumerator && EnumVal) {
bool FoundHexdecimalEnumerator = false;
- SourceLocation EndLoc = Enumerator->getLocEnd();
+ SourceLocation EndLoc = Enumerator->getEndLoc();
Token Tok;
if (!PP.getRawToken(EndLoc, Tok, /*IgnoreWhiteSpace=*/true))
if (Tok.isLiteral() && Tok.getLength() > 2) {
@@ -928,7 +930,7 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
if (const EnumType *EnumTy = qt->getAs<EnumType>()) {
if (EnumTy->getDecl() == EnumDcl) {
bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
- if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
+ if (!InsertFoundation(Ctx, TypedefDcl->getBeginLoc()))
return false;
edit::Commit commit(*Editor);
rewriteToNSMacroDecl(Ctx, EnumDcl, TypedefDcl, *NSAPIObj, commit, !NSOptions);
@@ -941,7 +943,7 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx,
// We may still use NS_OPTIONS based on what we find in the enumertor list.
bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl);
- if (!InsertFoundation(Ctx, TypedefDcl->getLocStart()))
+ if (!InsertFoundation(Ctx, TypedefDcl->getBeginLoc()))
return false;
edit::Commit commit(*Editor);
bool Res = rewriteToNSEnumDecl(EnumDcl, TypedefDcl, *NSAPIObj,
@@ -964,7 +966,7 @@ static void ReplaceWithInstancetype(ASTContext &Ctx,
ClassString = "instancetype";
}
else {
- R = SourceRange(OM->getLocStart(), OM->getLocStart());
+ R = SourceRange(OM->getBeginLoc(), OM->getBeginLoc());
ClassString = OM->isInstanceMethod() ? '-' : '+';
ClassString += " (instancetype)";
}
@@ -986,7 +988,7 @@ static void ReplaceWithClasstype(const ObjCMigrateASTConsumer &ASTC,
}
}
else {
- R = SourceRange(OM->getLocStart(), OM->getLocStart());
+ R = SourceRange(OM->getBeginLoc(), OM->getBeginLoc());
ClassString = "+ (";
ClassString += IDecl->getName(); ClassString += "*)";
}
@@ -1257,7 +1259,7 @@ void ObjCMigrateASTConsumer::migrateNsReturnsInnerPointer(ASTContext &Ctx,
return;
edit::Commit commit(*Editor);
- commit.insertBefore(OM->getLocEnd(), " NS_RETURNS_INNER_POINTER");
+ commit.insertBefore(OM->getEndLoc(), " NS_RETURNS_INNER_POINTER");
Editor->commit(commit);
}
@@ -1269,7 +1271,7 @@ void ObjCMigrateASTConsumer::migratePropertyNsReturnsInnerPointer(ASTContext &Ct
!NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER"))
return;
edit::Commit commit(*Editor);
- commit.insertBefore(P->getLocEnd(), " NS_RETURNS_INNER_POINTER ");
+ commit.insertBefore(P->getEndLoc(), " NS_RETURNS_INNER_POINTER ");
Editor->commit(commit);
}
@@ -1395,9 +1397,9 @@ void ObjCMigrateASTConsumer::AnnotateImplicitBridging(ASTContext &Ctx) {
CFFunctionIBCandidates[CFFunctionIBCandidates.size()-1];
const char *PragmaString = "\nCF_IMPLICIT_BRIDGING_ENABLED\n\n";
edit::Commit commit(*Editor);
- commit.insertBefore(FirstFD->getLocStart(), PragmaString);
+ commit.insertBefore(FirstFD->getBeginLoc(), PragmaString);
PragmaString = "\n\nCF_IMPLICIT_BRIDGING_DISABLED\n";
- SourceLocation EndLoc = LastFD->getLocEnd();
+ SourceLocation EndLoc = LastFD->getEndLoc();
// get location just past end of function location.
EndLoc = PP.getLocForEndOfToken(EndLoc);
if (isa<FunctionDecl>(LastFD)) {
@@ -1458,21 +1460,21 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
if (!ResultAnnotated) {
RetEffect Ret = CE.getReturnValue();
const char *AnnotationString = nullptr;
- if (Ret.getObjKind() == RetEffect::CF) {
+ if (Ret.getObjKind() == ObjKind::CF) {
if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
AnnotationString = " CF_RETURNS_RETAINED";
else if (Ret.notOwned() &&
NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED"))
AnnotationString = " CF_RETURNS_NOT_RETAINED";
}
- else if (Ret.getObjKind() == RetEffect::ObjC) {
+ else if (Ret.getObjKind() == ObjKind::ObjC) {
if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED"))
AnnotationString = " NS_RETURNS_RETAINED";
}
if (AnnotationString) {
edit::Commit commit(*Editor);
- commit.insertAfterToken(FuncDecl->getLocEnd(), AnnotationString);
+ commit.insertAfterToken(FuncDecl->getEndLoc(), AnnotationString);
Editor->commit(commit);
}
}
@@ -1482,14 +1484,15 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
ArgEffect AE = AEArgs[i];
- if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
+ if (AE.getKind() == DecRef && AE.getObjKind() == ObjKind::CF &&
+ !pd->hasAttr<CFConsumedAttr>() &&
NSAPIObj->isMacroDefined("CF_CONSUMED")) {
edit::Commit commit(*Editor);
commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
Editor->commit(commit);
- }
- else if (AE == DecRefMsg && !pd->hasAttr<NSConsumedAttr>() &&
- NSAPIObj->isMacroDefined("NS_CONSUMED")) {
+ } else if (AE.getKind() == DecRef && AE.getObjKind() == ObjKind::ObjC &&
+ !pd->hasAttr<NSConsumedAttr>() &&
+ NSAPIObj->isMacroDefined("NS_CONSUMED")) {
edit::Commit commit(*Editor);
commit.insertBefore(pd->getLocation(), "NS_CONSUMED ");
Editor->commit(commit);
@@ -1518,7 +1521,7 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
bool ReturnCFAudited = false;
if (!FuncIsReturnAnnotated) {
RetEffect Ret = CE.getReturnValue();
- if (Ret.getObjKind() == RetEffect::CF &&
+ if (Ret.getObjKind() == ObjKind::CF &&
(Ret.isOwned() || Ret.notOwned()))
ReturnCFAudited = true;
else if (!AuditedType(FuncDecl->getReturnType()))
@@ -1534,13 +1537,13 @@ ObjCMigrateASTConsumer::CF_BRIDGING_KIND
pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
ArgEffect AE = AEArgs[i];
- if (AE == DecRef /*CFConsumed annotated*/ || AE == IncRef) {
- if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>())
+ if ((AE.getKind() == DecRef /*CFConsumed annotated*/ ||
+ AE.getKind() == IncRef) && AE.getObjKind() == ObjKind::CF) {
+ if (AE.getKind() == DecRef && !pd->hasAttr<CFConsumedAttr>())
ArgCFAudited = true;
- else if (AE == IncRef)
+ else if (AE.getKind() == IncRef)
ArgCFAudited = true;
- }
- else {
+ } else {
QualType AT = pd->getType();
if (!AuditedType(AT)) {
AddCFAnnotations(Ctx, CE, FuncDecl, FuncIsReturnAnnotated);
@@ -1572,14 +1575,14 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
if (!ResultAnnotated) {
RetEffect Ret = CE.getReturnValue();
const char *AnnotationString = nullptr;
- if (Ret.getObjKind() == RetEffect::CF) {
+ if (Ret.getObjKind() == ObjKind::CF) {
if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED"))
AnnotationString = " CF_RETURNS_RETAINED";
else if (Ret.notOwned() &&
NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED"))
AnnotationString = " CF_RETURNS_NOT_RETAINED";
}
- else if (Ret.getObjKind() == RetEffect::ObjC) {
+ else if (Ret.getObjKind() == ObjKind::ObjC) {
ObjCMethodFamily OMF = MethodDecl->getMethodFamily();
switch (OMF) {
case clang::OMF_alloc:
@@ -1598,7 +1601,7 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
if (AnnotationString) {
edit::Commit commit(*Editor);
- commit.insertBefore(MethodDecl->getLocEnd(), AnnotationString);
+ commit.insertBefore(MethodDecl->getEndLoc(), AnnotationString);
Editor->commit(commit);
}
}
@@ -1608,7 +1611,9 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx,
pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
ArgEffect AE = AEArgs[i];
- if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() &&
+ if (AE.getKind() == DecRef
+ && AE.getObjKind() == ObjKind::CF
+ && !pd->hasAttr<CFConsumedAttr>() &&
NSAPIObj->isMacroDefined("CF_CONSUMED")) {
edit::Commit commit(*Editor);
commit.insertBefore(pd->getLocation(), "CF_CONSUMED ");
@@ -1624,19 +1629,20 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
return;
CallEffects CE = CallEffects::getEffect(MethodDecl);
- bool MethodIsReturnAnnotated = (MethodDecl->hasAttr<CFReturnsRetainedAttr>() ||
- MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
- MethodDecl->hasAttr<NSReturnsRetainedAttr>() ||
- MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
- MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>());
-
- if (CE.getReceiver() == DecRefMsg &&
+ bool MethodIsReturnAnnotated =
+ (MethodDecl->hasAttr<CFReturnsRetainedAttr>() ||
+ MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() ||
+ MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>());
+
+ if (CE.getReceiver().getKind() == DecRef &&
!MethodDecl->hasAttr<NSConsumesSelfAttr>() &&
MethodDecl->getMethodFamily() != OMF_init &&
MethodDecl->getMethodFamily() != OMF_release &&
NSAPIObj->isMacroDefined("NS_CONSUMES_SELF")) {
edit::Commit commit(*Editor);
- commit.insertBefore(MethodDecl->getLocEnd(), " NS_CONSUMES_SELF");
+ commit.insertBefore(MethodDecl->getEndLoc(), " NS_CONSUMES_SELF");
Editor->commit(commit);
}
@@ -1647,8 +1653,8 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
if (!MethodIsReturnAnnotated) {
RetEffect Ret = CE.getReturnValue();
- if ((Ret.getObjKind() == RetEffect::CF ||
- Ret.getObjKind() == RetEffect::ObjC) &&
+ if ((Ret.getObjKind() == ObjKind::CF ||
+ Ret.getObjKind() == ObjKind::ObjC) &&
(Ret.isOwned() || Ret.notOwned())) {
AddCFAnnotations(Ctx, CE, MethodDecl, false);
return;
@@ -1664,8 +1670,8 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation(
pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) {
const ParmVarDecl *pd = *pi;
ArgEffect AE = AEArgs[i];
- if ((AE == DecRef && !pd->hasAttr<CFConsumedAttr>()) || AE == IncRef ||
- !AuditedType(pd->getType())) {
+ if ((AE.getKind() == DecRef && !pd->hasAttr<CFConsumedAttr>()) ||
+ AE.getKind() == IncRef || !AuditedType(pd->getType())) {
AddCFAnnotations(Ctx, CE, MethodDecl, MethodIsReturnAnnotated);
return;
}
@@ -1713,7 +1719,7 @@ void ObjCMigrateASTConsumer::inferDesignatedInitializers(
continue;
if (hasSuperInitCall(MD)) {
edit::Commit commit(*Editor);
- commit.insert(IFaceM->getLocEnd(), " NS_DESIGNATED_INITIALIZER");
+ commit.insert(IFaceM->getEndLoc(), " NS_DESIGNATED_INITIALIZER");
Editor->commit(commit);
}
}
diff --git a/lib/ARCMigrate/TransAPIUses.cpp b/lib/ARCMigrate/TransAPIUses.cpp
index 40c8a070f813..6146e07b1d2a 100644
--- a/lib/ARCMigrate/TransAPIUses.cpp
+++ b/lib/ARCMigrate/TransAPIUses.cpp
@@ -75,7 +75,7 @@ public:
return true;
if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone)
- Pass.TA.report(parm->getLocStart(),
+ Pass.TA.report(parm->getBeginLoc(),
diag::err_arcmt_nsinvocation_ownership,
parm->getSourceRange())
<< selName;
diff --git a/lib/ARCMigrate/TransAutoreleasePool.cpp b/lib/ARCMigrate/TransAutoreleasePool.cpp
index 2d35655d186f..9d20774a89a6 100644
--- a/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -128,21 +128,21 @@ public:
Pass.TA.removeStmt(*scope.End);
Stmt::child_iterator retI = scope.End;
++retI;
- SourceLocation afterSemi = findLocationAfterSemi((*retI)->getLocEnd(),
- Pass.Ctx);
+ SourceLocation afterSemi =
+ findLocationAfterSemi((*retI)->getEndLoc(), Pass.Ctx);
assert(afterSemi.isValid() &&
"Didn't we check before setting IsFollowedBySimpleReturnStmt "
"to true?");
Pass.TA.insertAfterToken(afterSemi, "\n}");
Pass.TA.increaseIndentation(
- SourceRange(scope.getIndentedRange().getBegin(),
- (*retI)->getLocEnd()),
- scope.CompoundParent->getLocStart());
+ SourceRange(scope.getIndentedRange().getBegin(),
+ (*retI)->getEndLoc()),
+ scope.CompoundParent->getBeginLoc());
} else {
Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
Pass.TA.replaceStmt(*scope.End, "}");
Pass.TA.increaseIndentation(scope.getIndentedRange(),
- scope.CompoundParent->getLocStart());
+ scope.CompoundParent->getBeginLoc());
}
}
@@ -241,7 +241,7 @@ private:
Stmt::child_iterator rangeE = Begin;
for (Stmt::child_iterator I = rangeS; I != End; ++I)
++rangeE;
- return SourceRange((*rangeS)->getLocStart(), (*rangeE)->getLocEnd());
+ return SourceRange((*rangeS)->getBeginLoc(), (*rangeE)->getEndLoc());
}
};
@@ -256,8 +256,8 @@ private:
SourceLocation &declarationLoc)
: Ctx(ctx), referenceLoc(referenceLoc),
declarationLoc(declarationLoc) {
- ScopeRange = SourceRange((*scope.Begin)->getLocStart(),
- (*scope.End)->getLocStart());
+ ScopeRange = SourceRange((*scope.Begin)->getBeginLoc(),
+ (*scope.End)->getBeginLoc());
}
bool VisitDeclRefExpr(DeclRefExpr *E) {
@@ -307,7 +307,7 @@ private:
if (ReturnStmt *retS = dyn_cast<ReturnStmt>(*SI))
if ((retS->getRetValue() == nullptr ||
isa<DeclRefExpr>(retS->getRetValue()->IgnoreParenCasts())) &&
- findLocationAfterSemi(retS->getLocEnd(), Pass.Ctx).isValid()) {
+ findLocationAfterSemi(retS->getEndLoc(), Pass.Ctx).isValid()) {
scope.IsFollowedBySimpleReturnStmt = true;
++SI; // the return will be included in scope, don't check it.
}
@@ -328,9 +328,9 @@ private:
"NSAutoreleasePool scope that it was declared in", referenceLoc);
Pass.TA.reportNote("name declared here", declarationLoc);
Pass.TA.reportNote("intended @autoreleasepool scope begins here",
- (*scope.Begin)->getLocStart());
+ (*scope.Begin)->getBeginLoc());
Pass.TA.reportNote("intended @autoreleasepool scope ends here",
- (*scope.End)->getLocStart());
+ (*scope.End)->getBeginLoc());
return;
}
}
@@ -403,8 +403,8 @@ private:
return cast<Expr>(getEssential((Stmt*)E));
}
static Stmt *getEssential(Stmt *S) {
- if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S))
- S = EWC->getSubExpr();
+ if (FullExpr *FE = dyn_cast<FullExpr>(S))
+ S = FE->getSubExpr();
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParenCasts();
return S;
diff --git a/lib/ARCMigrate/TransGCAttrs.cpp b/lib/ARCMigrate/TransGCAttrs.cpp
index fb45cd92c1f6..7697d3f048e6 100644
--- a/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/lib/ARCMigrate/TransGCAttrs.cpp
@@ -81,10 +81,11 @@ public:
}
bool handleAttr(AttributedTypeLoc TL, Decl *D = nullptr) {
- if (TL.getAttrKind() != AttributedType::attr_objc_ownership)
+ auto *OwnershipAttr = TL.getAttrAs<ObjCOwnershipAttr>();
+ if (!OwnershipAttr)
return false;
- SourceLocation Loc = TL.getAttrNameLoc();
+ SourceLocation Loc = OwnershipAttr->getLocation();
unsigned RawLoc = Loc.getRawEncoding();
if (MigrateCtx.AttrSet.count(RawLoc))
return true;
@@ -93,13 +94,7 @@ public:
SourceManager &SM = Ctx.getSourceManager();
if (Loc.isMacroID())
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
- SmallString<32> Buf;
- bool Invalid = false;
- StringRef Spell = Lexer::getSpelling(
- SM.getSpellingLoc(TL.getAttrEnumOperandLoc()),
- Buf, SM, Ctx.getLangOpts(), &Invalid);
- if (Invalid)
- return false;
+ StringRef Spell = OwnershipAttr->getKind()->getName();
MigrationContext::GCAttrOccurrence::AttrKind Kind;
if (Spell == "strong")
Kind = MigrationContext::GCAttrOccurrence::Strong;
@@ -284,7 +279,7 @@ static void checkAllAtProps(MigrationContext &MigrateCtx,
}
for (unsigned i = 0, e = ATLs.size(); i != e; ++i) {
- SourceLocation Loc = ATLs[i].first.getAttrNameLoc();
+ SourceLocation Loc = ATLs[i].first.getAttr()->getLocation();
if (Loc.isMacroID())
Loc = MigrateCtx.Pass.Ctx.getSourceManager()
.getImmediateExpansionRange(Loc)
@@ -340,7 +335,7 @@ void MigrationContext::dumpGCAttrs() {
llvm::errs() << "KIND: "
<< (Attr.Kind == GCAttrOccurrence::Strong ? "strong" : "weak");
llvm::errs() << "\nLOC: ";
- Attr.Loc.dump(Pass.Ctx.getSourceManager());
+ Attr.Loc.print(llvm::errs(), Pass.Ctx.getSourceManager());
llvm::errs() << "\nTYPE: ";
Attr.ModifiedType.dump();
if (Attr.Dcl) {
diff --git a/lib/ARCMigrate/TransGCCalls.cpp b/lib/ARCMigrate/TransGCCalls.cpp
index 3a236d34cd4b..eff142ba3922 100644
--- a/lib/ARCMigrate/TransGCCalls.cpp
+++ b/lib/ARCMigrate/TransGCCalls.cpp
@@ -38,7 +38,7 @@ public:
TransformActions &TA = MigrateCtx.Pass.TA;
if (MigrateCtx.isGCOwnedNonObjC(E->getType())) {
- TA.report(E->getLocStart(), diag::warn_arcmt_nsalloc_realloc,
+ TA.report(E->getBeginLoc(), diag::warn_arcmt_nsalloc_realloc,
E->getSourceRange());
return true;
}
diff --git a/lib/ARCMigrate/TransProtectedScope.cpp b/lib/ARCMigrate/TransProtectedScope.cpp
index 1bb4c1f4e449..bfc542e7497c 100644
--- a/lib/ARCMigrate/TransProtectedScope.cpp
+++ b/lib/ARCMigrate/TransProtectedScope.cpp
@@ -73,12 +73,13 @@ public:
Curr = Curr->getNextSwitchCase();
}
- SourceLocation NextLoc = S->getLocEnd();
+ SourceLocation NextLoc = S->getEndLoc();
Curr = S->getSwitchCaseList();
// We iterate over case statements in reverse source-order.
while (Curr) {
- Cases.push_back(CaseInfo(Curr,SourceRange(Curr->getLocStart(), NextLoc)));
- NextLoc = Curr->getLocStart();
+ Cases.push_back(
+ CaseInfo(Curr, SourceRange(Curr->getBeginLoc(), NextLoc)));
+ NextLoc = Curr->getBeginLoc();
Curr = Curr->getNextSwitchCase();
}
return true;
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index c411044ecde3..d199bb936547 100644
--- a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -71,9 +71,10 @@ public:
// will likely die immediately while previously it was kept alive
// by the autorelease pool. This is bad practice in general, leave it
// and emit an error to force the user to restructure their code.
- Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
+ Pass.TA.reportError(
+ "it is not safe to remove an unused 'autorelease' "
"message; its receiver may be destroyed immediately",
- E->getLocStart(), E->getSourceRange());
+ E->getBeginLoc(), E->getSourceRange());
return true;
}
}
@@ -89,7 +90,7 @@ public:
std::string err = "it is not safe to remove '";
err += E->getSelector().getAsString() + "' message on "
"an __unsafe_unretained type";
- Pass.TA.reportError(err, rec->getLocStart());
+ Pass.TA.reportError(err, rec->getBeginLoc());
return true;
}
@@ -98,18 +99,21 @@ public:
std::string err = "it is not safe to remove '";
err += E->getSelector().getAsString() + "' message on "
"a global variable";
- Pass.TA.reportError(err, rec->getLocStart());
+ Pass.TA.reportError(err, rec->getBeginLoc());
return true;
}
if (E->getMethodFamily() == OMF_release && isDelegateMessage(rec)) {
- Pass.TA.reportError("it is not safe to remove 'retain' "
+ Pass.TA.reportError(
+ "it is not safe to remove 'retain' "
"message on the result of a 'delegate' message; "
"the object that was passed to 'setDelegate:' may not be "
- "properly retained", rec->getLocStart());
+ "properly retained",
+ rec->getBeginLoc());
return true;
}
}
+ break;
case OMF_dealloc:
break;
}
@@ -250,7 +254,7 @@ private:
}
while (OuterS && (isa<ParenExpr>(OuterS) ||
isa<CastExpr>(OuterS) ||
- isa<ExprWithCleanups>(OuterS)));
+ isa<FullExpr>(OuterS)));
if (!OuterS)
return std::make_pair(prevStmt, nextStmt);
@@ -373,8 +377,8 @@ private:
RecContainer = StmtE;
Rec = Init->IgnoreParenImpCasts();
- if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec))
- Rec = EWC->getSubExpr()->IgnoreParenImpCasts();
+ if (FullExpr *FE = dyn_cast<FullExpr>(Rec))
+ Rec = FE->getSubExpr()->IgnoreParenImpCasts();
RecRange = Rec->getSourceRange();
if (SM.isMacroArgExpansion(RecRange.getBegin()))
RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin()));
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
index 66167d37ac95..9d46d8c5fcae 100644
--- a/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -209,7 +209,7 @@ private:
// We will remove the compiler diagnostic.
if (!TA.hasDiagnostic(diag::err_arc_mismatched_cast,
diag::err_arc_cast_requires_bridge,
- E->getLocStart())) {
+ E->getBeginLoc())) {
Trans.abort();
return;
}
@@ -225,13 +225,12 @@ private:
}
TA.clearDiagnostic(diag::err_arc_mismatched_cast,
- diag::err_arc_cast_requires_bridge,
- E->getLocStart());
+ diag::err_arc_cast_requires_bridge, E->getBeginLoc());
if (Kind == OBC_Bridge || !Pass.CFBridgingFunctionsDefined()) {
if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
TA.insertAfterToken(CCE->getLParenLoc(), bridge);
} else {
- SourceLocation insertLoc = E->getSubExpr()->getLocStart();
+ SourceLocation insertLoc = E->getSubExpr()->getBeginLoc();
SmallString<128> newCast;
newCast += '(';
newCast += bridge;
@@ -243,7 +242,7 @@ private:
} else {
newCast += '(';
TA.insert(insertLoc, newCast.str());
- TA.insertAfterToken(E->getLocEnd(), ")");
+ TA.insertAfterToken(E->getEndLoc(), ")");
}
}
} else {
@@ -251,7 +250,7 @@ private:
SmallString<32> BridgeCall;
Expr *WrapE = E->getSubExpr();
- SourceLocation InsertLoc = WrapE->getLocStart();
+ SourceLocation InsertLoc = WrapE->getBeginLoc();
SourceManager &SM = Pass.Ctx.getSourceManager();
char PrevChar = *SM.getCharacterData(InsertLoc.getLocWithOffset(-1));
@@ -268,7 +267,7 @@ private:
} else {
BridgeCall += '(';
TA.insert(InsertLoc, BridgeCall);
- TA.insertAfterToken(WrapE->getLocEnd(), ")");
+ TA.insertAfterToken(WrapE->getEndLoc(), ")");
}
}
}
@@ -368,19 +367,19 @@ private:
err += family == OMF_autorelease ? "autorelease" : "release";
err += "' message; a __bridge cast may result in a pointer to a "
"destroyed object and a __bridge_retained may leak the object";
- Pass.TA.reportError(err, E->getLocStart(),
+ Pass.TA.reportError(err, E->getBeginLoc(),
E->getSubExpr()->getSourceRange());
Stmt *parent = E;
do {
parent = StmtMap->getParentIgnoreParenImpCasts(parent);
- } while (parent && isa<ExprWithCleanups>(parent));
+ } while (parent && isa<FullExpr>(parent));
if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) {
std::string note = "remove the cast and change return type of function "
"to '";
note += E->getSubExpr()->getType().getAsString(Pass.Ctx.getPrintingPolicy());
note += "' to have the object automatically autoreleased";
- Pass.TA.reportNote(note, retS->getLocStart());
+ Pass.TA.reportNote(note, retS->getBeginLoc());
}
}
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
index 3f79cc441e85..d1768bc56cfc 100644
--- a/lib/ARCMigrate/TransformActions.cpp
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -350,7 +350,7 @@ void TransformActionsImpl::replaceText(SourceLocation loc, StringRef text,
void TransformActionsImpl::replaceStmt(Stmt *S, StringRef text) {
assert(IsInTransaction && "Actions only allowed during a transaction");
text = getUniqueText(text);
- insert(S->getLocStart(), text);
+ insert(S->getBeginLoc(), text);
removeStmt(S);
}
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index 4a7af2858879..8bd2b407aee9 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -74,8 +74,8 @@ bool trans::isPlusOneAssign(const BinaryOperator *E) {
bool trans::isPlusOne(const Expr *E) {
if (!E)
return false;
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E))
- E = EWC->getSubExpr();
+ if (const FullExpr *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
if (const ObjCMessageExpr *
ME = dyn_cast<ObjCMessageExpr>(E->IgnoreParenCasts()))
@@ -359,7 +359,7 @@ MigrationContext::~MigrationContext() {
bool MigrationContext::isGCOwnedNonObjC(QualType T) {
while (!T.isNull()) {
if (const AttributedType *AttrT = T->getAs<AttributedType>()) {
- if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership)
+ if (AttrT->getAttrKind() == attr::ObjCOwnership)
return !AttrT->getModifiedType()->isObjCRetainableType();
}
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index c45b52a65a4d..c05b160b8e3d 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -416,18 +416,26 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
<< GetApproxValue(getComplexFloatImag()) << "i";
return;
case APValue::LValue: {
- LValueBase Base = getLValueBase();
- if (!Base) {
- Out << "0";
- return;
- }
-
bool IsReference = Ty->isReferenceType();
QualType InnerTy
= IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
if (InnerTy.isNull())
InnerTy = Ty;
+ LValueBase Base = getLValueBase();
+ if (!Base) {
+ if (isNullPointer()) {
+ Out << (Ctx.getLangOpts().CPlusPlus11 ? "nullptr" : "0");
+ } else if (IsReference) {
+ Out << "*(" << InnerTy.stream(Ctx.getPrintingPolicy()) << "*)"
+ << getLValueOffset().getQuantity();
+ } else {
+ Out << "(" << Ty.stream(Ctx.getPrintingPolicy()) << ")"
+ << getLValueOffset().getQuantity();
+ }
+ return;
+ }
+
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index d50f4493788a..21b6f36e9aa7 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -48,6 +48,7 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -192,7 +193,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
isa<ObjCPropertyDecl>(D) ||
isa<RedeclarableTemplateDecl>(D) ||
isa<ClassTemplateSpecializationDecl>(D))
- DeclLoc = D->getLocStart();
+ DeclLoc = D->getBeginLoc();
else {
DeclLoc = D->getLocation();
if (DeclLoc.isMacroID()) {
@@ -200,7 +201,7 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
// If location of the typedef name is in a macro, it is because being
// declared via a macro. Try using declaration's starting location as
// the "declaration location".
- DeclLoc = D->getLocStart();
+ DeclLoc = D->getBeginLoc();
} else if (const auto *TD = dyn_cast<TagDecl>(D)) {
// If location of the tag decl is inside a macro, but the spelling of
// the tag name comes from a macro argument, it looks like a special
@@ -795,11 +796,10 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
CompCategories(this_()), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
+ TraversalScope = {TUDecl};
}
ASTContext::~ASTContext() {
- ReleaseParentMapEntries();
-
// Release the DenseMaps associated with DeclContext objects.
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
@@ -837,22 +837,80 @@ ASTContext::~ASTContext() {
Value.second->~PerModuleInitializers();
}
-void ASTContext::ReleaseParentMapEntries() {
- if (!PointerParents) return;
- for (const auto &Entry : *PointerParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
+class ASTContext::ParentMap {
+ /// Contains parents of a node.
+ using ParentVector = llvm::SmallVector<ast_type_traits::DynTypedNode, 2>;
+
+ /// Maps from a node to its parents. This is used for nodes that have
+ /// pointer identity only, which are more common and we can save space by
+ /// only storing a unique pointer to them.
+ using ParentMapPointers = llvm::DenseMap<
+ const void *,
+ llvm::PointerUnion4<const Decl *, const Stmt *,
+ ast_type_traits::DynTypedNode *, ParentVector *>>;
+
+ /// Parent map for nodes without pointer identity. We store a full
+ /// DynTypedNode for all keys.
+ using ParentMapOtherNodes = llvm::DenseMap<
+ ast_type_traits::DynTypedNode,
+ llvm::PointerUnion4<const Decl *, const Stmt *,
+ ast_type_traits::DynTypedNode *, ParentVector *>>;
+
+ ParentMapPointers PointerParents;
+ ParentMapOtherNodes OtherParents;
+ class ASTVisitor;
+
+ static ast_type_traits::DynTypedNode
+ getSingleDynTypedNodeFromParentMap(ParentMapPointers::mapped_type U) {
+ if (const auto *D = U.dyn_cast<const Decl *>())
+ return ast_type_traits::DynTypedNode::create(*D);
+ if (const auto *S = U.dyn_cast<const Stmt *>())
+ return ast_type_traits::DynTypedNode::create(*S);
+ return *U.get<ast_type_traits::DynTypedNode *>();
+ }
+
+ template <typename NodeTy, typename MapTy>
+ static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
+ const MapTy &Map) {
+ auto I = Map.find(Node);
+ if (I == Map.end()) {
+ return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
+ }
+ if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
+ return llvm::makeArrayRef(*V);
+ }
+ return getSingleDynTypedNodeFromParentMap(I->second);
+ }
+
+public:
+ ParentMap(ASTContext &Ctx);
+ ~ParentMap() {
+ for (const auto &Entry : PointerParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
}
- }
- for (const auto &Entry : *OtherParents) {
- if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
- delete Entry.second.get<ast_type_traits::DynTypedNode *>();
- } else if (Entry.second.is<ParentVector *>()) {
- delete Entry.second.get<ParentVector *>();
+ for (const auto &Entry : OtherParents) {
+ if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
+ delete Entry.second.get<ast_type_traits::DynTypedNode *>();
+ } else if (Entry.second.is<ParentVector *>()) {
+ delete Entry.second.get<ParentVector *>();
+ }
}
}
+
+ DynTypedNodeList getParents(const ast_type_traits::DynTypedNode &Node) {
+ if (Node.getNodeKind().hasPointerIdentity())
+ return getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
+ return getDynNodeFromMap(Node, OtherParents);
+ }
+};
+
+void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
+ TraversalScope = TopLevelDecls;
+ Parents.reset();
}
void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
@@ -885,7 +943,9 @@ void ASTContext::PrintStats() const {
#define TYPE(Name, Parent) \
if (counts[Idx]) \
llvm::errs() << " " << counts[Idx] << " " << #Name \
- << " types\n"; \
+ << " types, " << sizeof(Name##Type) << " each " \
+ << "(" << counts[Idx] * sizeof(Name##Type) \
+ << " bytes)\n"; \
TotalBytes += counts[Idx] * sizeof(Name##Type); \
++Idx;
#define ABSTRACT_TYPE(Name, Parent)
@@ -929,14 +989,11 @@ void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
if (auto *Listener = getASTMutationListener())
Listener->RedefinedHiddenDefinition(ND, M);
- if (getLangOpts().ModulesLocalVisibility)
- MergedDefModules[ND].push_back(M);
- else
- ND->setVisibleDespiteOwningModule();
+ MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
}
void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
- auto It = MergedDefModules.find(ND);
+ auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
if (It == MergedDefModules.end())
return;
@@ -1241,6 +1298,10 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
+
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ InitBuiltinType(Id##Ty, BuiltinType::Id);
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
// Builtin type for __objc_yes and __objc_no
@@ -1892,6 +1953,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
AS = getTargetAddressSpace(
Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
Width = Target->getPointerWidth(AS);
@@ -2295,12 +2359,11 @@ structHasUniqueObjectRepresentations(const ASTContext &Context,
}
}
- llvm::sort(
- Bases.begin(), Bases.end(), [&](const std::pair<QualType, int64_t> &L,
- const std::pair<QualType, int64_t> &R) {
- return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
- Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
- });
+ llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
+ const std::pair<QualType, int64_t> &R) {
+ return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
+ Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
+ });
for (const auto Base : Bases) {
int64_t BaseOffset = Context.toBits(
@@ -2500,21 +2563,24 @@ const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
/// Get the copy initialization expression of VarDecl, or nullptr if
/// none exists.
-Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
+ASTContext::BlockVarCopyInit
+ASTContext::getBlockVarCopyInit(const VarDecl*VD) const {
assert(VD && "Passed null params");
assert(VD->hasAttr<BlocksAttr>() &&
"getBlockVarCopyInits - not __block var");
- llvm::DenseMap<const VarDecl*, Expr*>::iterator
- I = BlockVarCopyInits.find(VD);
- return (I != BlockVarCopyInits.end()) ? I->second : nullptr;
+ auto I = BlockVarCopyInits.find(VD);
+ if (I != BlockVarCopyInits.end())
+ return I->second;
+ return {nullptr, false};
}
/// Set the copy inialization expression of a block var decl.
-void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
- assert(VD && Init && "Passed null params");
+void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
+ bool CanThrow) {
+ assert(VD && CopyExpr && "Passed null params");
assert(VD->hasAttr<BlocksAttr>() &&
"setBlockVarCopyInits - not __block var");
- BlockVarCopyInits[VD] = Init;
+ BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
}
TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
@@ -3695,30 +3761,20 @@ QualType ASTContext::getFunctionTypeInternal(
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- // FunctionProtoType objects are allocated with extra bytes after
- // them for three variable size arrays at the end:
- // - parameter types
- // - exception types
- // - extended parameter information
- // Instead of the exception types, there could be a noexcept
- // expression, or information used to resolve the exception
- // specification.
- size_t Size =
- sizeof(FunctionProtoType) + NumArgs * sizeof(QualType) +
- FunctionProtoType::getExceptionSpecSize(
- EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
-
- // Put the ExtParameterInfos last. If all were equal, it would make
- // more sense to put these before the exception specification, because
- // it's much easier to skip past them compared to the elaborate switch
- // required to skip the exception specification. However, all is not
- // equal; ExtParameterInfos are used to model very uncommon features,
- // and it's better not to burden the more common paths.
- if (EPI.ExtParameterInfos) {
- Size += NumArgs * sizeof(FunctionProtoType::ExtParameterInfo);
- }
-
- auto *FTP = (FunctionProtoType *) Allocate(Size, TypeAlignment);
+ // Compute the needed size to hold this FunctionProtoType and the
+ // various trailing objects.
+ auto ESH = FunctionProtoType::getExceptionSpecSize(
+ EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
+ size_t Size = FunctionProtoType::totalSizeToAlloc<
+ QualType, FunctionType::FunctionTypeExtraBitfields,
+ FunctionType::ExceptionType, Expr *, FunctionDecl *,
+ FunctionProtoType::ExtParameterInfo, Qualifiers>(
+ NumArgs, FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
+ ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
+ EPI.ExtParameterInfos ? NumArgs : 0,
+ EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
+
+ auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
Types.push_back(FTP);
@@ -3870,7 +3926,7 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
return QualType(newType, 0);
}
-QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
+QualType ASTContext::getAttributedType(attr::Kind attrKind,
QualType modifiedType,
QualType equivalentType) {
llvm::FoldingSetNodeID id;
@@ -4130,8 +4186,10 @@ QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
(void)CheckT;
}
- T = new (*this, TypeAlignment)
- ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
+ void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
+ TypeAlignment);
+ T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
+
Types.push_back(T);
ElaboratedTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
@@ -4262,7 +4320,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
Arg = TemplateArgument(ArgType);
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Expr *E = new (*this) DeclRefExpr(
- NTTP, /*enclosing*/false,
+ *this, NTTP, /*enclosing*/ false,
NTTP->getType().getNonLValueExprType(*this),
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
@@ -4542,8 +4600,8 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
if (!protocols.empty()) {
// Apply the protocol qualifers.
bool hasError;
- Canonical = applyObjCProtocolQualifiers(Canonical, protocols, hasError,
- true/*allowOnPointerType*/);
+ Canonical = getCanonicalType(applyObjCProtocolQualifiers(
+ Canonical, protocols, hasError, true /*allowOnPointerType*/));
assert(!hasError && "Error when apply protocol qualifier to bound type");
}
}
@@ -5098,7 +5156,7 @@ bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) {
return true;
}
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
if (T1OPType && T2OPType) {
@@ -5762,50 +5820,86 @@ int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
}
TypedefDecl *ASTContext::getCFConstantStringDecl() const {
- if (!CFConstantStringTypeDecl) {
- assert(!CFConstantStringTagDecl &&
- "tag and typedef should be initialized together");
- CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
- CFConstantStringTagDecl->startDefinition();
-
- QualType FieldTypes[4];
- const char *FieldNames[4];
-
- // const int *isa;
- FieldTypes[0] = getPointerType(IntTy.withConst());
- FieldNames[0] = "isa";
- // int flags;
- FieldTypes[1] = IntTy;
- FieldNames[1] = "flags";
- // const char *str;
- FieldTypes[2] = getPointerType(CharTy.withConst());
- FieldNames[2] = "str";
- // long length;
- FieldTypes[3] = LongTy;
- FieldNames[3] = "length";
-
- // Create fields
- for (unsigned i = 0; i < 4; ++i) {
- FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTagDecl,
- SourceLocation(),
- SourceLocation(),
- &Idents.get(FieldNames[i]),
- FieldTypes[i], /*TInfo=*/nullptr,
- /*BitWidth=*/nullptr,
- /*Mutable=*/false,
- ICIS_NoInit);
- Field->setAccess(AS_public);
- CFConstantStringTagDecl->addDecl(Field);
- }
+ if (CFConstantStringTypeDecl)
+ return CFConstantStringTypeDecl;
+
+ assert(!CFConstantStringTagDecl &&
+ "tag and typedef should be initialized together");
+ CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag");
+ CFConstantStringTagDecl->startDefinition();
- CFConstantStringTagDecl->completeDefinition();
- // This type is designed to be compatible with NSConstantString, but cannot
- // use the same name, since NSConstantString is an interface.
- auto tagType = getTagDeclType(CFConstantStringTagDecl);
- CFConstantStringTypeDecl =
- buildImplicitTypedef(tagType, "__NSConstantString");
+ struct {
+ QualType Type;
+ const char *Name;
+ } Fields[5];
+ unsigned Count = 0;
+
+ /// Objective-C ABI
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// const int *isa;
+ /// int flags;
+ /// const char *str;
+ /// long length;
+ /// } __NSConstantString;
+ ///
+ /// Swift ABI (4.1, 4.2)
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// uintptr_t _cfisa;
+ /// uintptr_t _swift_rc;
+ /// _Atomic(uint64_t) _cfinfoa;
+ /// const char *_ptr;
+ /// uint32_t _length;
+ /// } __NSConstantString;
+ ///
+ /// Swift ABI (5.0)
+ ///
+ /// typedef struct __NSConstantString_tag {
+ /// uintptr_t _cfisa;
+ /// uintptr_t _swift_rc;
+ /// _Atomic(uint64_t) _cfinfoa;
+ /// const char *_ptr;
+ /// uintptr_t _length;
+ /// } __NSConstantString;
+
+ const auto CFRuntime = getLangOpts().CFRuntime;
+ if (static_cast<unsigned>(CFRuntime) <
+ static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
+ Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" };
+ Fields[Count++] = { IntTy, "flags" };
+ Fields[Count++] = { getPointerType(CharTy.withConst()), "str" };
+ Fields[Count++] = { LongTy, "length" };
+ } else {
+ Fields[Count++] = { getUIntPtrType(), "_cfisa" };
+ Fields[Count++] = { getUIntPtrType(), "_swift_rc" };
+ Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" };
+ Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" };
+ if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
+ CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
+ Fields[Count++] = { IntTy, "_ptr" };
+ else
+ Fields[Count++] = { getUIntPtrType(), "_ptr" };
}
+ // Create fields
+ for (unsigned i = 0; i < Count; ++i) {
+ FieldDecl *Field =
+ FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(),
+ SourceLocation(), &Idents.get(Fields[i].Name),
+ Fields[i].Type, /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
+ Field->setAccess(AS_public);
+ CFConstantStringTagDecl->addDecl(Field);
+ }
+
+ CFConstantStringTagDecl->completeDefinition();
+ // This type is designed to be compatible with NSConstantString, but cannot
+ // use the same name, since NSConstantString is an interface.
+ auto tagType = getTagDeclType(CFConstantStringTagDecl);
+ CFConstantStringTypeDecl =
+ buildImplicitTypedef(tagType, "__NSConstantString");
+
return CFConstantStringTypeDecl;
}
@@ -5959,7 +6053,7 @@ LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
bool ASTContext::BlockRequiresCopying(QualType Ty,
const VarDecl *D) {
if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
- const Expr *copyExpr = getBlockVarCopyInits(D);
+ const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr();
if (!copyExpr && record->hasTrivialDestructor()) return false;
return true;
@@ -5999,7 +6093,7 @@ bool ASTContext::BlockRequiresCopying(QualType Ty,
bool ASTContext::getByrefLifetime(QualType Ty,
Qualifiers::ObjCLifetime &LifeTime,
bool &HasByrefExtendedLayout) const {
- if (!getLangOpts().ObjC1 ||
+ if (!getLangOpts().ObjC ||
getLangOpts().getGC() != LangOptions::NonGC)
return false;
@@ -6471,6 +6565,9 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
@@ -7677,7 +7774,7 @@ Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
if (getLangOpts().getGC() == LangOptions::NonGC)
return Qualifiers::GCNone;
- assert(getLangOpts().ObjC1);
+ assert(getLangOpts().ObjC);
Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
// Default behaviour under objective-C's gc is for ObjC pointers
@@ -8033,7 +8130,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
// Also add the protocols associated with the LHS interface.
Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
- // Add all of the protocls for the RHS.
+ // Add all of the protocols for the RHS.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
// Start with the protocol qualifiers.
@@ -9384,9 +9481,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// qualified with an address space.
char *End;
unsigned AddrSpace = strtoul(Str, &End, 10);
- if (End != Str && AddrSpace != 0) {
- Type = Context.getAddrSpaceQualType(Type,
- getLangASFromTargetAS(AddrSpace));
+ if (End != Str) {
+ // Note AddrSpace == 0 is not the same as an unspecified address space.
+ Type = Context.getAddrSpaceQualType(
+ Type,
+ Context.getLangASForBuiltinAddressSpace(AddrSpace));
Str = End;
}
if (c == '*')
@@ -9717,6 +9816,14 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
cast<FunctionDecl>(D)->getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition;
+ // Implicit member function definitions, such as operator= might not be
+ // marked as template specializations, since they're not coming from a
+ // template but synthesized directly on the class.
+ IsExpInstDef |=
+ isa<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->getParent()->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+
if (getExternalSource()->DeclIsFromPCHWithObjectFile(D) && !IsExpInstDef)
return false;
}
@@ -9766,6 +9873,12 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
const auto *VD = cast<VarDecl>(D);
assert(VD->isFileVarDecl() && "Expected file scoped var");
+ // If the decl is marked as `declare target to`, it should be emitted for the
+ // host and for the device.
+ if (LangOpts.OpenMP &&
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ return true;
+
if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
!isMSStaticDataMemberInlineDefinition(VD))
return false;
@@ -9797,27 +9910,18 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (DeclMustBeEmitted(BindingVD))
return true;
- // If the decl is marked as `declare target`, it should be emitted.
- for (const auto *Decl : D->redecls()) {
- if (!Decl->hasAttrs())
- continue;
- if (const auto *Attr = Decl->getAttr<OMPDeclareTargetDeclAttr>())
- if (Attr->getMapType() != OMPDeclareTargetDeclAttr::MT_Link)
- return true;
- }
-
return false;
}
void ASTContext::forEachMultiversionedFunctionVersion(
const FunctionDecl *FD,
- llvm::function_ref<void(const FunctionDecl *)> Pred) const {
+ llvm::function_ref<void(FunctionDecl *)> Pred) const {
assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
- FD = FD->getCanonicalDecl();
+ FD = FD->getMostRecentDecl();
for (auto *CurDecl :
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
- FunctionDecl *CurFD = CurDecl->getAsFunction()->getCanonicalDecl();
+ FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) {
SeenDecls.insert(CurFD);
@@ -10054,21 +10158,10 @@ bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
-static ast_type_traits::DynTypedNode getSingleDynTypedNodeFromParentMap(
- ASTContext::ParentMapPointers::mapped_type U) {
- if (const auto *D = U.dyn_cast<const Decl *>())
- return ast_type_traits::DynTypedNode::create(*D);
- if (const auto *S = U.dyn_cast<const Stmt *>())
- return ast_type_traits::DynTypedNode::create(*S);
- return *U.get<ast_type_traits::DynTypedNode *>();
-}
-
-namespace {
-
/// Template specializations to abstract away from pointers and TypeLocs.
/// @{
template <typename T>
-ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
+static ast_type_traits::DynTypedNode createDynTypedNode(const T &Node) {
return ast_type_traits::DynTypedNode::create(*Node);
}
template <>
@@ -10082,160 +10175,121 @@ createDynTypedNode(const NestedNameSpecifierLoc &Node) {
}
/// @}
- /// A \c RecursiveASTVisitor that builds a map from nodes to their
- /// parents as defined by the \c RecursiveASTVisitor.
- ///
- /// Note that the relationship described here is purely in terms of AST
- /// traversal - there are other relationships (for example declaration context)
- /// in the AST that are better modeled by special matchers.
- ///
- /// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
- class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
- public:
- /// Builds and returns the translation unit's parent map.
- ///
- /// The caller takes ownership of the returned \c ParentMap.
- static std::pair<ASTContext::ParentMapPointers *,
- ASTContext::ParentMapOtherNodes *>
- buildMap(TranslationUnitDecl &TU) {
- ParentMapASTVisitor Visitor(new ASTContext::ParentMapPointers,
- new ASTContext::ParentMapOtherNodes);
- Visitor.TraverseDecl(&TU);
- return std::make_pair(Visitor.Parents, Visitor.OtherParents);
- }
+/// A \c RecursiveASTVisitor that builds a map from nodes to their
+/// parents as defined by the \c RecursiveASTVisitor.
+///
+/// Note that the relationship described here is purely in terms of AST
+/// traversal - there are other relationships (for example declaration context)
+/// in the AST that are better modeled by special matchers.
+///
+/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
+class ASTContext::ParentMap::ASTVisitor
+ : public RecursiveASTVisitor<ASTVisitor> {
+public:
+ ASTVisitor(ParentMap &Map) : Map(Map) {}
- private:
- friend class RecursiveASTVisitor<ParentMapASTVisitor>;
+private:
+ friend class RecursiveASTVisitor<ASTVisitor>;
- using VisitorBase = RecursiveASTVisitor<ParentMapASTVisitor>;
+ using VisitorBase = RecursiveASTVisitor<ASTVisitor>;
- ParentMapASTVisitor(ASTContext::ParentMapPointers *Parents,
- ASTContext::ParentMapOtherNodes *OtherParents)
- : Parents(Parents), OtherParents(OtherParents) {}
+ bool shouldVisitTemplateInstantiations() const { return true; }
- bool shouldVisitTemplateInstantiations() const {
- return true;
- }
+ bool shouldVisitImplicitCode() const { return true; }
- bool shouldVisitImplicitCode() const {
+ template <typename T, typename MapNodeTy, typename BaseTraverseFn,
+ typename MapTy>
+ bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
+ MapTy *Parents) {
+ if (!Node)
return true;
- }
-
- template <typename T, typename MapNodeTy, typename BaseTraverseFn,
- typename MapTy>
- bool TraverseNode(T Node, MapNodeTy MapNode,
- BaseTraverseFn BaseTraverse, MapTy *Parents) {
- if (!Node)
- return true;
- if (ParentStack.size() > 0) {
- // FIXME: Currently we add the same parent multiple times, but only
- // when no memoization data is available for the type.
- // For example when we visit all subexpressions of template
- // instantiations; this is suboptimal, but benign: the only way to
- // visit those is with hasAncestor / hasParent, and those do not create
- // new matches.
- // The plan is to enable DynTypedNode to be storable in a map or hash
- // map. The main problem there is to implement hash functions /
- // comparison operators for all types that DynTypedNode supports that
- // do not have pointer identity.
- auto &NodeOrVector = (*Parents)[MapNode];
- if (NodeOrVector.isNull()) {
- if (const auto *D = ParentStack.back().get<Decl>())
- NodeOrVector = D;
- else if (const auto *S = ParentStack.back().get<Stmt>())
- NodeOrVector = S;
- else
- NodeOrVector =
- new ast_type_traits::DynTypedNode(ParentStack.back());
- } else {
- if (!NodeOrVector.template is<ASTContext::ParentVector *>()) {
- auto *Vector = new ASTContext::ParentVector(
- 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
- delete NodeOrVector
- .template dyn_cast<ast_type_traits::DynTypedNode *>();
- NodeOrVector = Vector;
- }
-
- auto *Vector =
- NodeOrVector.template get<ASTContext::ParentVector *>();
- // Skip duplicates for types that have memoization data.
- // We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
- // types.
- bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
- if (!Found)
- Vector->push_back(ParentStack.back());
+ if (ParentStack.size() > 0) {
+ // FIXME: Currently we add the same parent multiple times, but only
+ // when no memoization data is available for the type.
+ // For example when we visit all subexpressions of template
+ // instantiations; this is suboptimal, but benign: the only way to
+ // visit those is with hasAncestor / hasParent, and those do not create
+ // new matches.
+ // The plan is to enable DynTypedNode to be storable in a map or hash
+ // map. The main problem there is to implement hash functions /
+ // comparison operators for all types that DynTypedNode supports that
+ // do not have pointer identity.
+ auto &NodeOrVector = (*Parents)[MapNode];
+ if (NodeOrVector.isNull()) {
+ if (const auto *D = ParentStack.back().get<Decl>())
+ NodeOrVector = D;
+ else if (const auto *S = ParentStack.back().get<Stmt>())
+ NodeOrVector = S;
+ else
+ NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
+ } else {
+ if (!NodeOrVector.template is<ParentVector *>()) {
+ auto *Vector = new ParentVector(
+ 1, getSingleDynTypedNodeFromParentMap(NodeOrVector));
+ delete NodeOrVector
+ .template dyn_cast<ast_type_traits::DynTypedNode *>();
+ NodeOrVector = Vector;
}
- }
- ParentStack.push_back(createDynTypedNode(Node));
- bool Result = BaseTraverse();
- ParentStack.pop_back();
- return Result;
- }
- bool TraverseDecl(Decl *DeclNode) {
- return TraverseNode(DeclNode, DeclNode,
- [&] { return VisitorBase::TraverseDecl(DeclNode); },
- Parents);
+ auto *Vector = NodeOrVector.template get<ParentVector *>();
+ // Skip duplicates for types that have memoization data.
+ // We must check that the type has memoization data before calling
+ // std::find() because DynTypedNode::operator== can't compare all
+ // types.
+ bool Found = ParentStack.back().getMemoizationData() &&
+ std::find(Vector->begin(), Vector->end(),
+ ParentStack.back()) != Vector->end();
+ if (!Found)
+ Vector->push_back(ParentStack.back());
+ }
}
+ ParentStack.push_back(createDynTypedNode(Node));
+ bool Result = BaseTraverse();
+ ParentStack.pop_back();
+ return Result;
+ }
- bool TraverseStmt(Stmt *StmtNode) {
- return TraverseNode(StmtNode, StmtNode,
- [&] { return VisitorBase::TraverseStmt(StmtNode); },
- Parents);
- }
+ bool TraverseDecl(Decl *DeclNode) {
+ return TraverseNode(
+ DeclNode, DeclNode, [&] { return VisitorBase::TraverseDecl(DeclNode); },
+ &Map.PointerParents);
+ }
- bool TraverseTypeLoc(TypeLoc TypeLocNode) {
- return TraverseNode(
- TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
- [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
- OtherParents);
- }
+ bool TraverseStmt(Stmt *StmtNode) {
+ return TraverseNode(
+ StmtNode, StmtNode, [&] { return VisitorBase::TraverseStmt(StmtNode); },
+ &Map.PointerParents);
+ }
- bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
- return TraverseNode(
- NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
- [&] {
- return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode);
- },
- OtherParents);
- }
+ bool TraverseTypeLoc(TypeLoc TypeLocNode) {
+ return TraverseNode(
+ TypeLocNode, ast_type_traits::DynTypedNode::create(TypeLocNode),
+ [&] { return VisitorBase::TraverseTypeLoc(TypeLocNode); },
+ &Map.OtherParents);
+ }
- ASTContext::ParentMapPointers *Parents;
- ASTContext::ParentMapOtherNodes *OtherParents;
- llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
- };
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNSLocNode) {
+ return TraverseNode(
+ NNSLocNode, ast_type_traits::DynTypedNode::create(NNSLocNode),
+ [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
+ &Map.OtherParents);
+ }
-} // namespace
+ ParentMap &Map;
+ llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
+};
-template <typename NodeTy, typename MapTy>
-static ASTContext::DynTypedNodeList getDynNodeFromMap(const NodeTy &Node,
- const MapTy &Map) {
- auto I = Map.find(Node);
- if (I == Map.end()) {
- return llvm::ArrayRef<ast_type_traits::DynTypedNode>();
- }
- if (const auto *V =
- I->second.template dyn_cast<ASTContext::ParentVector *>()) {
- return llvm::makeArrayRef(*V);
- }
- return getSingleDynTypedNodeFromParentMap(I->second);
+ASTContext::ParentMap::ParentMap(ASTContext &Ctx) {
+ ASTVisitor(*this).TraverseAST(Ctx);
}
ASTContext::DynTypedNodeList
ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
- if (!PointerParents) {
- // We always need to run over the whole translation unit, as
+ if (!Parents)
+ // We build the parent map for the traversal scope (usually whole TU), as
// hasAncestor can escape any subtree.
- auto Maps = ParentMapASTVisitor::buildMap(*getTranslationUnitDecl());
- PointerParents.reset(Maps.first);
- OtherParents.reset(Maps.second);
- }
- if (Node.getNodeKind().hasPointerIdentity())
- return getDynNodeFromMap(Node.getMemoizationData(), *PointerParents);
- return getDynNodeFromMap(Node, *OtherParents);
+ Parents = llvm::make_unique<ParentMap>(*this);
+ return Parents->getParents(Node);
}
bool
@@ -10322,6 +10376,16 @@ QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
}
}
+LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
+ if (LangOpts.OpenCL)
+ return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
+
+ if (LangOpts.CUDA)
+ return getTargetInfo().getCUDABuiltinAddressSpace(AS);
+
+ return getLangASFromTargetAS(AS);
+}
+
// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
// doesn't include ASTContext.h
template
@@ -10419,3 +10483,22 @@ unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
return 0;
}
}
+
+FixedPointSemantics ASTContext::getFixedPointSemantics(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+ bool isSigned = Ty->isSignedFixedPointType();
+ return FixedPointSemantics(
+ static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned,
+ Ty->isSaturatedFixedPointType(),
+ !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
+}
+
+APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+ return APFixedPoint::getMax(getFixedPointSemantics(Ty));
+}
+
+APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
+ assert(Ty->isFixedPointType());
+ return APFixedPoint::getMin(getFixedPointSemantics(Ty));
+}
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index 50d2d2999e51..dd0585558572 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -334,6 +334,20 @@ void clang::FormatASTNodeDiagnosticArgument(
switch (Kind) {
default: llvm_unreachable("unknown ArgumentKind");
+ case DiagnosticsEngine::ak_qual: {
+ assert(Modifier.empty() && Argument.empty() &&
+ "Invalid modifier for Qualfiers argument");
+
+ Qualifiers Q(Qualifiers::fromOpaqueValue(Val));
+ auto S = Q.getAsString();
+ if (S.empty()) {
+ OS << "unqualified";
+ NeedQuotes = false;
+ } else {
+ OS << Q.getAsString();
+ }
+ break;
+ }
case DiagnosticsEngine::ak_qualtype_pair: {
TemplateDiffTypes &TDT = *reinterpret_cast<TemplateDiffTypes*>(Val);
QualType FromType =
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 8a653ecebae3..b52ec21943e6 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -13,7 +13,9 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDumperUtils.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/AttrVisitor.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclLookups.h"
@@ -22,6 +24,8 @@
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateArgumentVisitor.h"
+#include "clang/AST/TextNodeDumper.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Module.h"
@@ -35,177 +39,35 @@ using namespace clang::comments;
//===----------------------------------------------------------------------===//
namespace {
- // Colors used for various parts of the AST dump
- // Do not use bold yellow for any text. It is hard to read on white screens.
- struct TerminalColor {
- raw_ostream::Colors Color;
- bool Bold;
- };
+ class ASTDumper
+ : public ConstDeclVisitor<ASTDumper>,
+ public ConstStmtVisitor<ASTDumper>,
+ public ConstCommentVisitor<ASTDumper, void, const FullComment *>,
+ public TypeVisitor<ASTDumper>,
+ public ConstAttrVisitor<ASTDumper>,
+ public ConstTemplateArgumentVisitor<ASTDumper> {
- // Red - CastColor
- // Green - TypeColor
- // Bold Green - DeclKindNameColor, UndeserializedColor
- // Yellow - AddressColor, LocationColor
- // Blue - CommentColor, NullColor, IndentColor
- // Bold Blue - AttrColor
- // Bold Magenta - StmtColor
- // Cyan - ValueKindColor, ObjectKindColor
- // Bold Cyan - ValueColor, DeclNameColor
-
- // Decl kind names (VarDecl, FunctionDecl, etc)
- static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
- // Attr names (CleanupAttr, GuardedByAttr, etc)
- static const TerminalColor AttrColor = { raw_ostream::BLUE, true };
- // Statement names (DeclStmt, ImplicitCastExpr, etc)
- static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
- // Comment names (FullComment, ParagraphComment, TextComment, etc)
- static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
-
- // Type names (int, float, etc, plus user defined types)
- static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
-
- // Pointer address
- static const TerminalColor AddressColor = { raw_ostream::YELLOW, false };
- // Source locations
- static const TerminalColor LocationColor = { raw_ostream::YELLOW, false };
-
- // lvalue/xvalue
- static const TerminalColor ValueKindColor = { raw_ostream::CYAN, false };
- // bitfield/objcproperty/objcsubscript/vectorcomponent
- static const TerminalColor ObjectKindColor = { raw_ostream::CYAN, false };
-
- // Null statements
- static const TerminalColor NullColor = { raw_ostream::BLUE, false };
-
- // Undeserialized entities
- static const TerminalColor UndeserializedColor = { raw_ostream::GREEN, true };
-
- // CastKind from CastExpr's
- static const TerminalColor CastColor = { raw_ostream::RED, false };
-
- // Value of the statement
- static const TerminalColor ValueColor = { raw_ostream::CYAN, true };
- // Decl names
- static const TerminalColor DeclNameColor = { raw_ostream::CYAN, true };
-
- // Indents ( `, -. | )
- static const TerminalColor IndentColor = { raw_ostream::BLUE, false };
+ TextNodeDumper NodeDumper;
- class ASTDumper
- : public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>,
- public ConstCommentVisitor<ASTDumper>, public TypeVisitor<ASTDumper> {
raw_ostream &OS;
- const CommandTraits *Traits;
- const SourceManager *SM;
/// The policy to use for printing; can be defaulted.
PrintingPolicy PrintPolicy;
- /// Pending[i] is an action to dump an entity at level i.
- llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
-
/// Indicates whether we should trigger deserialization of nodes that had
/// not already been loaded.
bool Deserialize = false;
- /// Indicates whether we're at the top level.
- bool TopLevel = true;
-
- /// Indicates if we're handling the first child after entering a new depth.
- bool FirstChild = true;
-
- /// Prefix for currently-being-dumped entity.
- std::string Prefix;
-
- /// Keep track of the last location we print out so that we can
- /// print out deltas from then on out.
- const char *LastLocFilename = "";
- unsigned LastLocLine = ~0U;
-
- /// The \c FullComment parent of the comment being dumped.
- const FullComment *FC = nullptr;
-
- bool ShowColors;
+ const bool ShowColors;
/// Dump a child of the current node.
- template<typename Fn> void dumpChild(Fn doDumpChild) {
- // If we're at the top level, there's nothing interesting to do; just
- // run the dumper.
- if (TopLevel) {
- TopLevel = false;
- doDumpChild();
- while (!Pending.empty()) {
- Pending.back()(true);
- Pending.pop_back();
- }
- Prefix.clear();
- OS << "\n";
- TopLevel = true;
- return;
- }
-
- const FullComment *OrigFC = FC;
- auto dumpWithIndent = [this, doDumpChild, OrigFC](bool isLastChild) {
- // Print out the appropriate tree structure and work out the prefix for
- // children of this node. For instance:
- //
- // A Prefix = ""
- // |-B Prefix = "| "
- // | `-C Prefix = "| "
- // `-D Prefix = " "
- // |-E Prefix = " | "
- // `-F Prefix = " "
- // G Prefix = ""
- //
- // Note that the first level gets no prefix.
- {
- OS << '\n';
- ColorScope Color(*this, IndentColor);
- OS << Prefix << (isLastChild ? '`' : '|') << '-';
- this->Prefix.push_back(isLastChild ? ' ' : '|');
- this->Prefix.push_back(' ');
- }
-
- FirstChild = true;
- unsigned Depth = Pending.size();
-
- FC = OrigFC;
- doDumpChild();
-
- // If any children are left, they're the last at their nesting level.
- // Dump those ones out now.
- while (Depth < Pending.size()) {
- Pending.back()(true);
- this->Pending.pop_back();
- }
-
- // Restore the old prefix.
- this->Prefix.resize(Prefix.size() - 2);
- };
-
- if (FirstChild) {
- Pending.push_back(std::move(dumpWithIndent));
- } else {
- Pending.back()(false);
- Pending.back() = std::move(dumpWithIndent);
- }
- FirstChild = false;
+ template<typename Fn> void dumpChild(Fn DoDumpChild) {
+ NodeDumper.AddChild(DoDumpChild);
+ }
+ template <typename Fn> void dumpChild(StringRef Label, Fn DoDumpChild) {
+ NodeDumper.AddChild(Label, DoDumpChild);
}
-
- class ColorScope {
- ASTDumper &Dumper;
- public:
- ColorScope(ASTDumper &Dumper, TerminalColor Color)
- : Dumper(Dumper) {
- if (Dumper.ShowColors)
- Dumper.OS.changeColor(Color.Color, Color.Bold);
- }
- ~ColorScope() {
- if (Dumper.ShowColors)
- Dumper.OS.resetColor();
- }
- };
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
@@ -219,40 +81,39 @@ namespace {
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors,
const PrintingPolicy &PrintPolicy)
- : OS(OS), Traits(Traits), SM(SM), PrintPolicy(PrintPolicy),
- ShowColors(ShowColors) {}
+ : NodeDumper(OS, ShowColors, SM, PrintPolicy, Traits), OS(OS),
+ PrintPolicy(PrintPolicy), ShowColors(ShowColors) {}
void setDeserialize(bool D) { Deserialize = D; }
void dumpDecl(const Decl *D);
- void dumpStmt(const Stmt *S);
- void dumpFullComment(const FullComment *C);
+ void dumpStmt(const Stmt *S, StringRef Label = {});
// Utilities
- void dumpPointer(const void *Ptr);
- void dumpSourceRange(SourceRange R);
- void dumpLocation(SourceLocation Loc);
- void dumpBareType(QualType T, bool Desugar = true);
- void dumpType(QualType T);
void dumpTypeAsChild(QualType T);
void dumpTypeAsChild(const Type *T);
- void dumpBareDeclRef(const Decl *Node);
- void dumpDeclRef(const Decl *Node, const char *Label = nullptr);
- void dumpName(const NamedDecl *D);
- bool hasNodes(const DeclContext *DC);
void dumpDeclContext(const DeclContext *DC);
void dumpLookups(const DeclContext *DC, bool DumpDecls);
void dumpAttr(const Attr *A);
// C++ Utilities
- void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCXXCtorInitializer(const CXXCtorInitializer *Init);
void dumpTemplateParameters(const TemplateParameterList *TPL);
void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI);
- void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A);
+ void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
+ const Decl *From = nullptr,
+ const char *Label = nullptr);
void dumpTemplateArgumentList(const TemplateArgumentList &TAL);
void dumpTemplateArgument(const TemplateArgument &A,
- SourceRange R = SourceRange());
+ SourceRange R = SourceRange(),
+ const Decl *From = nullptr,
+ const char *Label = nullptr);
+ template <typename SpecializationDecl>
+ void dumpTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly);
+ template <typename TemplateDecl>
+ void dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
// Objective-C utilities.
void dumpObjCTypeParamList(const ObjCTypeParamList *typeParams);
@@ -261,6 +122,9 @@ namespace {
void VisitComplexType(const ComplexType *T) {
dumpTypeAsChild(T->getElementType());
}
+ void VisitLocInfoType(const LocInfoType *T) {
+ dumpTypeAsChild(T->getTypeSourceInfo()->getType());
+ }
void VisitPointerType(const PointerType *T) {
dumpTypeAsChild(T->getPointeeType());
}
@@ -270,92 +134,39 @@ namespace {
void VisitReferenceType(const ReferenceType *T) {
dumpTypeAsChild(T->getPointeeType());
}
- void VisitRValueReferenceType(const ReferenceType *T) {
- if (T->isSpelledAsLValue())
- OS << " written as lvalue reference";
- VisitReferenceType(T);
- }
void VisitMemberPointerType(const MemberPointerType *T) {
dumpTypeAsChild(T->getClass());
dumpTypeAsChild(T->getPointeeType());
}
void VisitArrayType(const ArrayType *T) {
- switch (T->getSizeModifier()) {
- case ArrayType::Normal: break;
- case ArrayType::Static: OS << " static"; break;
- case ArrayType::Star: OS << " *"; break;
- }
- OS << " " << T->getIndexTypeQualifiers().getAsString();
dumpTypeAsChild(T->getElementType());
}
- void VisitConstantArrayType(const ConstantArrayType *T) {
- OS << " " << T->getSize();
- VisitArrayType(T);
- }
void VisitVariableArrayType(const VariableArrayType *T) {
- OS << " ";
- dumpSourceRange(T->getBracketsRange());
VisitArrayType(T);
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
- VisitArrayType(T);
- OS << " ";
- dumpSourceRange(T->getBracketsRange());
+ dumpTypeAsChild(T->getElementType());
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType *T) {
- OS << " ";
- dumpLocation(T->getAttributeLoc());
dumpTypeAsChild(T->getElementType());
dumpStmt(T->getSizeExpr());
}
void VisitVectorType(const VectorType *T) {
- switch (T->getVectorKind()) {
- case VectorType::GenericVector: break;
- case VectorType::AltiVecVector: OS << " altivec"; break;
- case VectorType::AltiVecPixel: OS << " altivec pixel"; break;
- case VectorType::AltiVecBool: OS << " altivec bool"; break;
- case VectorType::NeonVector: OS << " neon"; break;
- case VectorType::NeonPolyVector: OS << " neon poly"; break;
- }
- OS << " " << T->getNumElements();
dumpTypeAsChild(T->getElementType());
}
void VisitFunctionType(const FunctionType *T) {
- auto EI = T->getExtInfo();
- if (EI.getNoReturn()) OS << " noreturn";
- if (EI.getProducesResult()) OS << " produces_result";
- if (EI.getHasRegParm()) OS << " regparm " << EI.getRegParm();
- OS << " " << FunctionType::getNameForCallConv(EI.getCC());
dumpTypeAsChild(T->getReturnType());
}
void VisitFunctionProtoType(const FunctionProtoType *T) {
- auto EPI = T->getExtProtoInfo();
- if (EPI.HasTrailingReturn) OS << " trailing_return";
- if (T->isConst()) OS << " const";
- if (T->isVolatile()) OS << " volatile";
- if (T->isRestrict()) OS << " restrict";
- switch (EPI.RefQualifier) {
- case RQ_None: break;
- case RQ_LValue: OS << " &"; break;
- case RQ_RValue: OS << " &&"; break;
- }
- // FIXME: Exception specification.
- // FIXME: Consumed parameters.
VisitFunctionType(T);
for (QualType PT : T->getParamTypes())
dumpTypeAsChild(PT);
- if (EPI.Variadic)
+ if (T->getExtProtoInfo().Variadic)
dumpChild([=] { OS << "..."; });
}
- void VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
- dumpDeclRef(T->getDecl());
- }
- void VisitTypedefType(const TypedefType *T) {
- dumpDeclRef(T->getDecl());
- }
void VisitTypeOfExprType(const TypeOfExprType *T) {
dumpStmt(T->getUnderlyingExpr());
}
@@ -363,25 +174,12 @@ namespace {
dumpStmt(T->getUnderlyingExpr());
}
void VisitUnaryTransformType(const UnaryTransformType *T) {
- switch (T->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- OS << " underlying_type";
- break;
- }
dumpTypeAsChild(T->getBaseType());
}
- void VisitTagType(const TagType *T) {
- dumpDeclRef(T->getDecl());
- }
void VisitAttributedType(const AttributedType *T) {
// FIXME: AttrKind
dumpTypeAsChild(T->getModifiedType());
}
- void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
- OS << " depth " << T->getDepth() << " index " << T->getIndex();
- if (T->isParameterPack()) OS << " pack";
- dumpDeclRef(T->getDecl());
- }
void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
dumpTypeAsChild(T->getReplacedParameter());
}
@@ -390,25 +188,12 @@ namespace {
dumpTypeAsChild(T->getReplacedParameter());
dumpTemplateArgument(T->getArgumentPack());
}
- void VisitAutoType(const AutoType *T) {
- if (T->isDecltypeAuto()) OS << " decltype(auto)";
- if (!T->isDeduced())
- OS << " undeduced";
- }
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
- if (T->isTypeAlias()) OS << " alias";
- OS << " "; T->getTemplateName().dump(OS);
for (auto &Arg : *T)
dumpTemplateArgument(Arg);
if (T->isTypeAlias())
dumpTypeAsChild(T->getAliasedType());
}
- void VisitInjectedClassNameType(const InjectedClassNameType *T) {
- dumpDeclRef(T->getDecl());
- }
- void VisitObjCInterfaceType(const ObjCInterfaceType *T) {
- dumpDeclRef(T->getDecl());
- }
void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
dumpTypeAsChild(T->getPointeeType());
}
@@ -422,7 +207,6 @@ namespace {
dumpTypeAsChild(T->getOriginalType());
}
void VisitPackExpansionType(const PackExpansionType *T) {
- if (auto N = T->getNumExpansions()) OS << " expansions " << *N;
if (!T->isSugared())
dumpTypeAsChild(T->getPattern());
}
@@ -450,6 +234,7 @@ namespace {
// OpenMP decls
void VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D);
void VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D);
+ void VisitOMPRequiresDecl(const OMPRequiresDecl *D);
void VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D);
// C++ Decls
@@ -460,12 +245,6 @@ namespace {
void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
void VisitCXXRecordDecl(const CXXRecordDecl *D);
void VisitStaticAssertDecl(const StaticAssertDecl *D);
- template<typename SpecializationDecl>
- void VisitTemplateDeclSpecialization(const SpecializationDecl *D,
- bool DumpExplicitInst,
- bool DumpRefOnly);
- template<typename TemplateDecl>
- void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitClassTemplateSpecializationDecl(
@@ -504,96 +283,47 @@ namespace {
void VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D);
void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
+ void Visit(const BlockDecl::Capture &C);
void VisitBlockDecl(const BlockDecl *D);
// Stmts.
- void VisitStmt(const Stmt *Node);
void VisitDeclStmt(const DeclStmt *Node);
void VisitAttributedStmt(const AttributedStmt *Node);
- void VisitLabelStmt(const LabelStmt *Node);
- void VisitGotoStmt(const GotoStmt *Node);
void VisitCXXCatchStmt(const CXXCatchStmt *Node);
void VisitCapturedStmt(const CapturedStmt *Node);
// OpenMP
+ void Visit(const OMPClause *C);
void VisitOMPExecutableDirective(const OMPExecutableDirective *Node);
// Exprs
- void VisitExpr(const Expr *Node);
- void VisitCastExpr(const CastExpr *Node);
- void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
- void VisitDeclRefExpr(const DeclRefExpr *Node);
- void VisitPredefinedExpr(const PredefinedExpr *Node);
- void VisitCharacterLiteral(const CharacterLiteral *Node);
- void VisitIntegerLiteral(const IntegerLiteral *Node);
- void VisitFixedPointLiteral(const FixedPointLiteral *Node);
- void VisitFloatingLiteral(const FloatingLiteral *Node);
- void VisitStringLiteral(const StringLiteral *Str);
void VisitInitListExpr(const InitListExpr *ILE);
- void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *ILE);
- void VisitArrayInitIndexExpr(const ArrayInitIndexExpr *ILE);
- void VisitUnaryOperator(const UnaryOperator *Node);
- void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node);
- void VisitMemberExpr(const MemberExpr *Node);
- void VisitExtVectorElementExpr(const ExtVectorElementExpr *Node);
- void VisitBinaryOperator(const BinaryOperator *Node);
- void VisitCompoundAssignOperator(const CompoundAssignOperator *Node);
- void VisitAddrLabelExpr(const AddrLabelExpr *Node);
void VisitBlockExpr(const BlockExpr *Node);
void VisitOpaqueValueExpr(const OpaqueValueExpr *Node);
void VisitGenericSelectionExpr(const GenericSelectionExpr *E);
// C++
- void VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node);
- void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node);
- void VisitCXXThisExpr(const CXXThisExpr *Node);
- void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node);
- void VisitCXXUnresolvedConstructExpr(const CXXUnresolvedConstructExpr *Node);
- void VisitCXXConstructExpr(const CXXConstructExpr *Node);
- void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node);
- void VisitCXXNewExpr(const CXXNewExpr *Node);
- void VisitCXXDeleteExpr(const CXXDeleteExpr *Node);
- void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node);
- void VisitExprWithCleanups(const ExprWithCleanups *Node);
- void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node);
- void dumpCXXTemporary(const CXXTemporary *Temporary);
void VisitLambdaExpr(const LambdaExpr *Node) {
- VisitExpr(Node);
dumpDecl(Node->getLambdaClass());
}
void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
- void
- VisitCXXDependentScopeMemberExpr(const CXXDependentScopeMemberExpr *Node);
// ObjC
void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node);
- void VisitObjCEncodeExpr(const ObjCEncodeExpr *Node);
- void VisitObjCMessageExpr(const ObjCMessageExpr *Node);
- void VisitObjCBoxedExpr(const ObjCBoxedExpr *Node);
- void VisitObjCSelectorExpr(const ObjCSelectorExpr *Node);
- void VisitObjCProtocolExpr(const ObjCProtocolExpr *Node);
- void VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node);
- void VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node);
- void VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node);
- void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
// Comments.
- const char *getCommandName(unsigned CommandID);
- void dumpComment(const Comment *C);
-
- // Inline comments.
- void visitTextComment(const TextComment *C);
- void visitInlineCommandComment(const InlineCommandComment *C);
- void visitHTMLStartTagComment(const HTMLStartTagComment *C);
- void visitHTMLEndTagComment(const HTMLEndTagComment *C);
-
- // Block comments.
- void visitBlockCommandComment(const BlockCommandComment *C);
- void visitParamCommandComment(const ParamCommandComment *C);
- void visitTParamCommandComment(const TParamCommandComment *C);
- void visitVerbatimBlockComment(const VerbatimBlockComment *C);
- void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
- void visitVerbatimLineComment(const VerbatimLineComment *C);
+ void dumpComment(const Comment *C, const FullComment *FC);
+
+ void VisitExpressionTemplateArgument(const TemplateArgument &TA) {
+ dumpStmt(TA.getAsExpr());
+ }
+ void VisitPackTemplateArgument(const TemplateArgument &TA) {
+ for (const auto &TArg : TA.pack_elements())
+ dumpTemplateArgument(TArg);
+ }
+
+// Implements Visit methods for Attrs.
+#include "clang/AST/AttrNodeTraverse.inc"
};
}
@@ -601,188 +331,31 @@ namespace {
// Utilities
//===----------------------------------------------------------------------===//
-void ASTDumper::dumpPointer(const void *Ptr) {
- ColorScope Color(*this, AddressColor);
- OS << ' ' << Ptr;
-}
-
-void ASTDumper::dumpLocation(SourceLocation Loc) {
- if (!SM)
- return;
-
- ColorScope Color(*this, LocationColor);
- SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
-
- // The general format we print out is filename:line:col, but we drop pieces
- // that haven't changed since the last loc printed.
- PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
-
- if (PLoc.isInvalid()) {
- OS << "<invalid sloc>";
- return;
- }
-
- if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
- OS << PLoc.getFilename() << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
- LastLocFilename = PLoc.getFilename();
- LastLocLine = PLoc.getLine();
- } else if (PLoc.getLine() != LastLocLine) {
- OS << "line" << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
- LastLocLine = PLoc.getLine();
- } else {
- OS << "col" << ':' << PLoc.getColumn();
- }
-}
-
-void ASTDumper::dumpSourceRange(SourceRange R) {
- // Can't translate locations if a SourceManager isn't available.
- if (!SM)
- return;
-
- OS << " <";
- dumpLocation(R.getBegin());
- if (R.getBegin() != R.getEnd()) {
- OS << ", ";
- dumpLocation(R.getEnd());
- }
- OS << ">";
-
- // <t2.c:123:421[blah], t2.c:412:321>
-
-}
-
-void ASTDumper::dumpBareType(QualType T, bool Desugar) {
- ColorScope Color(*this, TypeColor);
-
- SplitQualType T_split = T.split();
- OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
-
- if (Desugar && !T.isNull()) {
- // If the type is sugared, also dump a (shallow) desugared type.
- SplitQualType D_split = T.getSplitDesugaredType();
- if (T_split != D_split)
- OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
- }
-}
-
-void ASTDumper::dumpType(QualType T) {
- OS << ' ';
- dumpBareType(T);
-}
-
void ASTDumper::dumpTypeAsChild(QualType T) {
SplitQualType SQT = T.split();
if (!SQT.Quals.hasQualifiers())
return dumpTypeAsChild(SQT.Ty);
dumpChild([=] {
- OS << "QualType";
- dumpPointer(T.getAsOpaquePtr());
- OS << " ";
- dumpBareType(T, false);
- OS << " " << T.split().Quals.getAsString();
+ NodeDumper.Visit(T);
dumpTypeAsChild(T.split().Ty);
});
}
void ASTDumper::dumpTypeAsChild(const Type *T) {
dumpChild([=] {
- if (!T) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
- if (const LocInfoType *LIT = llvm::dyn_cast<LocInfoType>(T)) {
- {
- ColorScope Color(*this, TypeColor);
- OS << "LocInfo Type";
- }
- dumpPointer(T);
- dumpTypeAsChild(LIT->getTypeSourceInfo()->getType());
+ NodeDumper.Visit(T);
+ if (!T)
return;
- }
-
- {
- ColorScope Color(*this, TypeColor);
- OS << T->getTypeClassName() << "Type";
- }
- dumpPointer(T);
- OS << " ";
- dumpBareType(QualType(T, 0), false);
+ TypeVisitor<ASTDumper>::Visit(T);
QualType SingleStepDesugar =
T->getLocallyUnqualifiedSingleStepDesugaredType();
if (SingleStepDesugar != QualType(T, 0))
- OS << " sugar";
- if (T->isDependentType())
- OS << " dependent";
- else if (T->isInstantiationDependentType())
- OS << " instantiation_dependent";
- if (T->isVariablyModifiedType())
- OS << " variably_modified";
- if (T->containsUnexpandedParameterPack())
- OS << " contains_unexpanded_pack";
- if (T->isFromAST())
- OS << " imported";
-
- TypeVisitor<ASTDumper>::Visit(T);
-
- if (SingleStepDesugar != QualType(T, 0))
dumpTypeAsChild(SingleStepDesugar);
});
}
-void ASTDumper::dumpBareDeclRef(const Decl *D) {
- if (!D) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
- return;
- }
-
- {
- ColorScope Color(*this, DeclKindNameColor);
- OS << D->getDeclKindName();
- }
- dumpPointer(D);
-
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- ColorScope Color(*this, DeclNameColor);
- OS << " '" << ND->getDeclName() << '\'';
- }
-
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
- dumpType(VD->getType());
-}
-
-void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
- if (!D)
- return;
-
- dumpChild([=]{
- if (Label)
- OS << Label << ' ';
- dumpBareDeclRef(D);
- });
-}
-
-void ASTDumper::dumpName(const NamedDecl *ND) {
- if (ND->getDeclName()) {
- ColorScope Color(*this, DeclNameColor);
- OS << ' ' << ND->getNameAsString();
- }
-}
-
-bool ASTDumper::hasNodes(const DeclContext *DC) {
- if (!DC)
- return false;
-
- return DC->hasExternalLexicalStorage() ||
- (Deserialize ? DC->decls_begin() != DC->decls_end()
- : DC->noload_decls_begin() != DC->noload_decls_end());
-}
-
void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
@@ -791,8 +364,8 @@ void ASTDumper::dumpDeclContext(const DeclContext *DC) {
dumpDecl(D);
if (DC->hasExternalLexicalStorage()) {
- dumpChild([=]{
- ColorScope Color(*this, UndeserializedColor);
+ dumpChild([=] {
+ ColorScope Color(OS, ShowColors, UndeserializedColor);
OS << "<undeserialized declarations>";
});
}
@@ -801,12 +374,12 @@ void ASTDumper::dumpDeclContext(const DeclContext *DC) {
void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
dumpChild([=] {
OS << "StoredDeclsMap ";
- dumpBareDeclRef(cast<Decl>(DC));
+ NodeDumper.dumpBareDeclRef(cast<Decl>(DC));
const DeclContext *Primary = DC->getPrimaryContext();
if (Primary != DC) {
OS << " primary";
- dumpPointer(cast<Decl>(Primary));
+ NodeDumper.dumpPointer(cast<Decl>(Primary));
}
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
@@ -821,14 +394,14 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
dumpChild([=] {
OS << "DeclarationName ";
{
- ColorScope Color(*this, DeclNameColor);
+ ColorScope Color(OS, ShowColors, DeclNameColor);
OS << '\'' << Name << '\'';
}
for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
RI != RE; ++RI) {
dumpChild([=] {
- dumpBareDeclRef(*RI);
+ NodeDumper.dumpBareDeclRef(*RI);
if ((*RI)->isHidden())
OS << " hidden";
@@ -850,7 +423,7 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
if (HasUndeserializedLookups) {
dumpChild([=] {
- ColorScope Color(*this, UndeserializedColor);
+ ColorScope Color(OS, ShowColors, UndeserializedColor);
OS << "<undeserialized lookups>";
});
}
@@ -859,87 +432,18 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
void ASTDumper::dumpAttr(const Attr *A) {
dumpChild([=] {
- {
- ColorScope Color(*this, AttrColor);
-
- switch (A->getKind()) {
-#define ATTR(X) case attr::X: OS << #X; break;
-#include "clang/Basic/AttrList.inc"
- }
- OS << "Attr";
- }
- dumpPointer(A);
- dumpSourceRange(A->getRange());
- if (A->isInherited())
- OS << " Inherited";
- if (A->isImplicit())
- OS << " Implicit";
-#include "clang/AST/AttrDump.inc"
+ NodeDumper.Visit(A);
+ ConstAttrVisitor<ASTDumper>::Visit(A);
});
}
-static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
-
-template<typename T>
-static void dumpPreviousDeclImpl(raw_ostream &OS, const Mergeable<T> *D) {
- const T *First = D->getFirstDecl();
- if (First != D)
- OS << " first " << First;
-}
-
-template<typename T>
-static void dumpPreviousDeclImpl(raw_ostream &OS, const Redeclarable<T> *D) {
- const T *Prev = D->getPreviousDecl();
- if (Prev)
- OS << " prev " << Prev;
-}
-
-/// Dump the previous declaration in the redeclaration chain for a declaration,
-/// if any.
-static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
- switch (D->getKind()) {
-#define DECL(DERIVED, BASE) \
- case Decl::DERIVED: \
- return dumpPreviousDeclImpl(OS, cast<DERIVED##Decl>(D));
-#define ABSTRACT_DECL(DECL)
-#include "clang/AST/DeclNodes.inc"
- }
- llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
-}
-
//===----------------------------------------------------------------------===//
// C++ Utilities
//===----------------------------------------------------------------------===//
-void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) {
- switch (AS) {
- case AS_none:
- break;
- case AS_public:
- OS << "public";
- break;
- case AS_protected:
- OS << "protected";
- break;
- case AS_private:
- OS << "private";
- break;
- }
-}
-
void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
dumpChild([=] {
- OS << "CXXCtorInitializer";
- if (Init->isAnyMemberInitializer()) {
- OS << ' ';
- dumpBareDeclRef(Init->getAnyMember());
- } else if (Init->isBaseInitializer()) {
- dumpType(QualType(Init->getBaseClass(), 0));
- } else if (Init->isDelegatingInitializer()) {
- dumpType(Init->getTypeSourceInfo()->getType());
- } else {
- llvm_unreachable("Unknown initializer type");
- }
+ NodeDumper.Visit(Init);
dumpStmt(Init->getInit());
});
}
@@ -959,8 +463,9 @@ void ASTDumper::dumpTemplateArgumentListInfo(
dumpTemplateArgumentLoc(TALI[i]);
}
-void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) {
- dumpTemplateArgument(A.getArgument(), A.getSourceRange());
+void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A,
+ const Decl *From, const char *Label) {
+ dumpTemplateArgument(A.getArgument(), A.getSourceRange(), From, Label);
}
void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
@@ -968,49 +473,11 @@ void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
dumpTemplateArgument(TAL[i]);
}
-void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
+void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R,
+ const Decl *From, const char *Label) {
dumpChild([=] {
- OS << "TemplateArgument";
- if (R.isValid())
- dumpSourceRange(R);
-
- switch (A.getKind()) {
- case TemplateArgument::Null:
- OS << " null";
- break;
- case TemplateArgument::Type:
- OS << " type";
- dumpType(A.getAsType());
- break;
- case TemplateArgument::Declaration:
- OS << " decl";
- dumpDeclRef(A.getAsDecl());
- break;
- case TemplateArgument::NullPtr:
- OS << " nullptr";
- break;
- case TemplateArgument::Integral:
- OS << " integral " << A.getAsIntegral();
- break;
- case TemplateArgument::Template:
- OS << " template ";
- A.getAsTemplate().dump(OS);
- break;
- case TemplateArgument::TemplateExpansion:
- OS << " template expansion";
- A.getAsTemplateOrTemplatePattern().dump(OS);
- break;
- case TemplateArgument::Expression:
- OS << " expr";
- dumpStmt(A.getAsExpr());
- break;
- case TemplateArgument::Pack:
- OS << " pack";
- for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end();
- I != E; ++I)
- dumpTemplateArgument(*I);
- break;
- }
+ NodeDumper.Visit(A, R, From, Label);
+ ConstTemplateArgumentVisitor<ASTDumper>::Visit(A);
});
}
@@ -1032,46 +499,9 @@ void ASTDumper::dumpObjCTypeParamList(const ObjCTypeParamList *typeParams) {
void ASTDumper::dumpDecl(const Decl *D) {
dumpChild([=] {
- if (!D) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
+ NodeDumper.Visit(D);
+ if (!D)
return;
- }
-
- {
- ColorScope Color(*this, DeclKindNameColor);
- OS << D->getDeclKindName() << "Decl";
- }
- dumpPointer(D);
- if (D->getLexicalDeclContext() != D->getDeclContext())
- OS << " parent " << cast<Decl>(D->getDeclContext());
- dumpPreviousDecl(OS, D);
- dumpSourceRange(D->getSourceRange());
- OS << ' ';
- dumpLocation(D->getLocation());
- if (D->isFromASTFile())
- OS << " imported";
- if (Module *M = D->getOwningModule())
- OS << " in " << M->getFullModuleName();
- if (auto *ND = dyn_cast<NamedDecl>(D))
- for (Module *M : D->getASTContext().getModulesWithMergedDefinition(
- const_cast<NamedDecl *>(ND)))
- dumpChild([=] { OS << "also in " << M->getFullModuleName(); });
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- if (ND->isHidden())
- OS << " hidden";
- if (D->isImplicit())
- OS << " implicit";
- if (D->isUsed())
- OS << " used";
- else if (D->isThisDeclarationReferenced())
- OS << " referenced";
- if (D->isInvalidDecl())
- OS << " invalid";
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isConstexpr())
- OS << " constexpr";
-
ConstDeclVisitor<ASTDumper>::Visit(D);
@@ -1081,22 +511,25 @@ void ASTDumper::dumpDecl(const Decl *D) {
if (const FullComment *Comment =
D->getASTContext().getLocalCommentForDeclUncached(D))
- dumpFullComment(Comment);
+ dumpComment(Comment, Comment);
// Decls within functions are visited by the body.
- if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
- hasNodes(dyn_cast<DeclContext>(D)))
- dumpDeclContext(cast<DeclContext>(D));
+ if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D)) {
+ auto DC = dyn_cast<DeclContext>(D);
+ if (DC &&
+ (DC->hasExternalLexicalStorage() ||
+ (Deserialize ? DC->decls_begin() != DC->decls_end()
+ : DC->noload_decls_begin() != DC->noload_decls_end())))
+ dumpDeclContext(DC);
+ }
});
}
-void ASTDumper::VisitLabelDecl(const LabelDecl *D) {
- dumpName(D);
-}
+void ASTDumper::VisitLabelDecl(const LabelDecl *D) { NodeDumper.dumpName(D); }
void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) {
- dumpName(D);
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getUnderlyingType());
if (D->isModulePrivate())
OS << " __module_private__";
dumpTypeAsChild(D->getUnderlyingType());
@@ -1109,16 +542,16 @@ void ASTDumper::VisitEnumDecl(const EnumDecl *D) {
else
OS << " struct";
}
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isFixed())
- dumpType(D->getIntegerType());
+ NodeDumper.dumpType(D->getIntegerType());
}
void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
OS << ' ' << D->getKindName();
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isCompleteDefinition())
@@ -1126,23 +559,23 @@ void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
}
void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (const Expr *Init = D->getInitExpr())
dumpStmt(Init);
}
void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
for (auto *Child : D->chain())
- dumpDeclRef(Child);
+ NodeDumper.dumpDeclRef(Child);
}
void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
@@ -1166,7 +599,7 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isTrivial())
OS << " trivial";
- if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) {
+ if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
switch (EPI.ExceptionSpec.Type) {
default: break;
@@ -1179,23 +612,7 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
}
- if (const FunctionTemplateSpecializationInfo *FTSI =
- D->getTemplateSpecializationInfo())
- dumpTemplateArgumentList(*FTSI->TemplateArguments);
-
- if (!D->param_begin() && D->getNumParams())
- dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
- else
- for (const ParmVarDecl *Parameter : D->parameters())
- dumpDecl(Parameter);
-
- if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D))
- for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
- E = C->init_end();
- I != E; ++I)
- dumpCXXCtorInitializer(*I);
-
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->size_overridden_methods() != 0) {
auto dumpOverride = [=](const CXXMethodDecl *D) {
SplitQualType T_split = D->getType().split();
@@ -1218,13 +635,26 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
}
+ if (const auto *FTSI = D->getTemplateSpecializationInfo())
+ dumpTemplateArgumentList(*FTSI->TemplateArguments);
+
+ if (!D->param_begin() && D->getNumParams())
+ dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
+ else
+ for (const ParmVarDecl *Parameter : D->parameters())
+ dumpDecl(Parameter);
+
+ if (const auto *C = dyn_cast<CXXConstructorDecl>(D))
+ for (const auto *I : C->inits())
+ dumpCXXCtorInitializer(I);
+
if (D->doesThisDeclarationHaveABody())
dumpStmt(D->getBody());
}
void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->isMutable())
OS << " mutable";
if (D->isModulePrivate())
@@ -1237,8 +667,8 @@ void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
}
void ASTDumper::VisitVarDecl(const VarDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
@@ -1272,8 +702,8 @@ void ASTDumper::VisitDecompositionDecl(const DecompositionDecl *D) {
}
void ASTDumper::VisitBindingDecl(const BindingDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (auto *E = D->getBinding())
dumpStmt(E);
}
@@ -1320,12 +750,13 @@ void ASTDumper::VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
}
void ASTDumper::VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
OS << " combiner";
- dumpStmt(D->getCombiner());
- if (auto *Initializer = D->getInitializer()) {
+ NodeDumper.dumpPointer(D->getCombiner());
+ if (const auto *Initializer = D->getInitializer()) {
OS << " initializer";
+ NodeDumper.dumpPointer(Initializer);
switch (D->getInitializerKind()) {
case OMPDeclareReductionDecl::DirectInit:
OS << " omp_priv = ";
@@ -1336,13 +767,36 @@ void ASTDumper::VisitOMPDeclareReductionDecl(const OMPDeclareReductionDecl *D) {
case OMPDeclareReductionDecl::CallInit:
break;
}
+ }
+
+ dumpStmt(D->getCombiner());
+ if (const auto *Initializer = D->getInitializer())
dumpStmt(Initializer);
+}
+
+void ASTDumper::VisitOMPRequiresDecl(const OMPRequiresDecl *D) {
+ for (auto *C : D->clauselists()) {
+ dumpChild([=] {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> OMPClause";
+ return;
+ }
+ {
+ ColorScope Color(OS, ShowColors, AttrColor);
+ StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
+ << ClauseName.drop_front() << "Clause";
+ }
+ NodeDumper.dumpPointer(C);
+ NodeDumper.dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+ });
}
}
void ASTDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
dumpStmt(D->getInit());
}
@@ -1351,31 +805,31 @@ void ASTDumper::VisitOMPCapturedExprDecl(const OMPCapturedExprDecl *D) {
//===----------------------------------------------------------------------===//
void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->isInline())
OS << " inline";
if (!D->isOriginalNamespace())
- dumpDeclRef(D->getOriginalNamespace(), "original");
+ NodeDumper.dumpDeclRef(D->getOriginalNamespace(), "original");
}
void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
OS << ' ';
- dumpBareDeclRef(D->getNominatedNamespace());
+ NodeDumper.dumpBareDeclRef(D->getNominatedNamespace());
}
void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
- dumpName(D);
- dumpDeclRef(D->getAliasedNamespace());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getAliasedNamespace());
}
void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
- dumpName(D);
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getUnderlyingType());
dumpTypeAsChild(D->getUnderlyingType());
}
void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
}
@@ -1387,7 +841,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "DefinitionData";
}
#define FLAG(fn, name) if (D->fn()) OS << " " #name;
@@ -1415,7 +869,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "DefaultConstructor";
}
FLAG(hasDefaultConstructor, exists);
@@ -1429,7 +883,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyConstructor";
}
FLAG(hasSimpleCopyConstructor, simple);
@@ -1447,7 +901,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "MoveConstructor";
}
FLAG(hasMoveConstructor, exists);
@@ -1464,7 +918,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "CopyAssignment";
}
FLAG(hasTrivialCopyAssignment, trivial);
@@ -1478,7 +932,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "MoveAssignment";
}
FLAG(hasMoveAssignment, exists);
@@ -1492,7 +946,7 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
{
- ColorScope Color(*this, DeclKindNameColor);
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
OS << "Destructor";
}
FLAG(hasSimpleDestructor, simple);
@@ -1511,8 +965,8 @@ void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
dumpChild([=] {
if (I.isVirtual())
OS << "virtual ";
- dumpAccessSpecifier(I.getAccessSpecifier());
- dumpType(I.getType());
+ NodeDumper.dumpAccessSpecifier(I.getAccessSpecifier());
+ NodeDumper.dumpType(I.getType());
if (I.isPackExpansion())
OS << "...";
});
@@ -1524,10 +978,10 @@ void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
dumpStmt(D->getMessage());
}
-template<typename SpecializationDecl>
-void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
- bool DumpExplicitInst,
- bool DumpRefOnly) {
+template <typename SpecializationDecl>
+void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
+ bool DumpExplicitInst,
+ bool DumpRefOnly) {
bool DumpedAny = false;
for (auto *RedeclWithBadType : D->redecls()) {
// FIXME: The redecls() range sometimes has elements of a less-specific
@@ -1551,7 +1005,7 @@ void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
- dumpDeclRef(Redecl);
+ NodeDumper.dumpDeclRef(Redecl);
else
dumpDecl(Redecl);
DumpedAny = true;
@@ -1563,31 +1017,30 @@ void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
// Ensure we dump at least one decl for each specialization.
if (!DumpedAny)
- dumpDeclRef(D);
+ NodeDumper.dumpDeclRef(D);
}
-template<typename TemplateDecl>
-void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
- bool DumpExplicitInst) {
- dumpName(D);
+template <typename TemplateDecl>
+void ASTDumper::dumpTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst) {
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
for (auto *Child : D->specializations())
- VisitTemplateDeclSpecialization(Child, DumpExplicitInst,
- !D->isCanonicalDecl());
+ dumpTemplateDeclSpecialization(Child, DumpExplicitInst,
+ !D->isCanonicalDecl());
}
void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
// FIXME: We don't add a declaration of a function template specialization
// to its context when it's explicitly instantiated, so dump explicit
// instantiations when we dump the template itself.
- VisitTemplateDecl(D, true);
+ dumpTemplateDecl(D, true);
}
void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
- VisitTemplateDecl(D, false);
+ dumpTemplateDecl(D, false);
}
void ASTDumper::VisitClassTemplateSpecializationDecl(
@@ -1610,11 +1063,11 @@ void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
}
void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
- VisitTemplateDecl(D, false);
+ dumpTemplateDecl(D, false);
}
void ASTDumper::VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
}
@@ -1638,19 +1091,25 @@ void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument());
+ dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from"
+ : "previous");
}
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
- dumpType(D->getType());
+ NodeDumper.dumpType(D->getType());
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
if (D->hasDefaultArgument())
- dumpTemplateArgument(D->getDefaultArgument());
+ dumpTemplateArgument(D->getDefaultArgument(), SourceRange(),
+ D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from"
+ : "previous");
}
void ASTDumper::VisitTemplateTemplateParmDecl(
@@ -1658,10 +1117,12 @@ void ASTDumper::VisitTemplateTemplateParmDecl(
OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
- dumpName(D);
+ NodeDumper.dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
if (D->hasDefaultArgument())
- dumpTemplateArgumentLoc(D->getDefaultArgument());
+ dumpTemplateArgumentLoc(
+ D->getDefaultArgument(), D->getDefaultArgStorage().getInheritedFrom(),
+ D->defaultArgumentWasInherited() ? "inherited from" : "previous");
}
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
@@ -1684,12 +1145,12 @@ void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D)
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
- dumpType(D->getType());
+ NodeDumper.dumpType(D->getType());
}
void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
OS << ' ';
- dumpBareDeclRef(D->getTargetDecl());
+ NodeDumper.dumpBareDeclRef(D->getTargetDecl());
if (auto *TD = dyn_cast<TypeDecl>(D->getUnderlyingDecl()))
dumpTypeAsChild(TD->getTypeForDecl());
}
@@ -1701,21 +1162,21 @@ void ASTDumper::VisitConstructorUsingShadowDecl(
dumpChild([=] {
OS << "target ";
- dumpBareDeclRef(D->getTargetDecl());
+ NodeDumper.dumpBareDeclRef(D->getTargetDecl());
});
dumpChild([=] {
OS << "nominated ";
- dumpBareDeclRef(D->getNominatedBaseClass());
+ NodeDumper.dumpBareDeclRef(D->getNominatedBaseClass());
OS << ' ';
- dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
+ NodeDumper.dumpBareDeclRef(D->getNominatedBaseClassShadowDecl());
});
dumpChild([=] {
OS << "constructed ";
- dumpBareDeclRef(D->getConstructedBaseClass());
+ NodeDumper.dumpBareDeclRef(D->getConstructedBaseClass());
OS << ' ';
- dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
+ NodeDumper.dumpBareDeclRef(D->getConstructedBaseClassShadowDecl());
});
}
@@ -1728,12 +1189,12 @@ void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
OS << ' ';
- dumpAccessSpecifier(D->getAccess());
+ NodeDumper.dumpAccessSpecifier(D->getAccess());
}
void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
if (TypeSourceInfo *T = D->getFriendType())
- dumpType(T->getType());
+ NodeDumper.dumpType(T->getType());
else
dumpDecl(D->getFriendDecl());
}
@@ -1743,8 +1204,8 @@ void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
//===----------------------------------------------------------------------===//
void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->getSynthesize())
OS << " synthesize";
@@ -1772,8 +1233,8 @@ void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
OS << " -";
else
OS << " +";
- dumpName(D);
- dumpType(D->getReturnType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getReturnType());
if (D->isThisDeclarationADefinition()) {
dumpDeclContext(D);
@@ -1790,7 +1251,7 @@ void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
}
void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
switch (D->getVariance()) {
case ObjCTypeParamVariance::Invariant:
break;
@@ -1806,47 +1267,47 @@ void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
if (D->hasExplicitBound())
OS << " bounded";
- dumpType(D->getUnderlyingType());
+ NodeDumper.dumpType(D->getUnderlyingType());
}
void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
- dumpName(D);
- dumpDeclRef(D->getClassInterface());
- dumpObjCTypeParamList(D->getTypeParamList());
- dumpDeclRef(D->getImplementation());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getClassInterface());
+ NodeDumper.dumpDeclRef(D->getImplementation());
for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(),
E = D->protocol_end();
I != E; ++I)
- dumpDeclRef(*I);
+ NodeDumper.dumpDeclRef(*I);
+ dumpObjCTypeParamList(D->getTypeParamList());
}
void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
- dumpName(D);
- dumpDeclRef(D->getClassInterface());
- dumpDeclRef(D->getCategoryDecl());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getClassInterface());
+ NodeDumper.dumpDeclRef(D->getCategoryDecl());
}
void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
- dumpName(D);
+ NodeDumper.dumpName(D);
for (auto *Child : D->protocols())
- dumpDeclRef(Child);
+ NodeDumper.dumpDeclRef(Child);
}
void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
- dumpName(D);
- dumpObjCTypeParamList(D->getTypeParamListAsWritten());
- dumpDeclRef(D->getSuperClass(), "super");
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getSuperClass(), "super");
- dumpDeclRef(D->getImplementation());
+ NodeDumper.dumpDeclRef(D->getImplementation());
for (auto *Child : D->protocols())
- dumpDeclRef(Child);
+ NodeDumper.dumpDeclRef(Child);
+ dumpObjCTypeParamList(D->getTypeParamListAsWritten());
}
void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
- dumpName(D);
- dumpDeclRef(D->getSuperClass(), "super");
- dumpDeclRef(D->getClassInterface());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getSuperClass(), "super");
+ NodeDumper.dumpDeclRef(D->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
E = D->init_end();
I != E; ++I)
@@ -1854,13 +1315,13 @@ void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
}
void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
- dumpName(D);
- dumpDeclRef(D->getClassInterface());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpDeclRef(D->getClassInterface());
}
void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
- dumpName(D);
- dumpType(D->getType());
+ NodeDumper.dumpName(D);
+ NodeDumper.dumpType(D->getType());
if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
OS << " required";
@@ -1892,20 +1353,28 @@ void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
if (Attrs & ObjCPropertyDecl::OBJC_PR_class)
OS << " class";
if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
- dumpDeclRef(D->getGetterMethodDecl(), "getter");
+ NodeDumper.dumpDeclRef(D->getGetterMethodDecl(), "getter");
if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
- dumpDeclRef(D->getSetterMethodDecl(), "setter");
+ NodeDumper.dumpDeclRef(D->getSetterMethodDecl(), "setter");
}
}
void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
- dumpName(D->getPropertyDecl());
+ NodeDumper.dumpName(D->getPropertyDecl());
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
OS << " synthesize";
else
OS << " dynamic";
- dumpDeclRef(D->getPropertyDecl());
- dumpDeclRef(D->getPropertyIvarDecl());
+ NodeDumper.dumpDeclRef(D->getPropertyDecl());
+ NodeDumper.dumpDeclRef(D->getPropertyIvarDecl());
+}
+
+void ASTDumper::Visit(const BlockDecl::Capture &C) {
+ dumpChild([=] {
+ NodeDumper.Visit(C);
+ if (C.hasCopyExpr())
+ dumpStmt(C.getCopyExpr());
+ });
}
void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
@@ -1918,21 +1387,8 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
if (D->capturesCXXThis())
dumpChild([=]{ OS << "capture this"; });
- for (const auto &I : D->captures()) {
- dumpChild([=] {
- OS << "capture";
- if (I.isByRef())
- OS << " byref";
- if (I.isNested())
- OS << " nested";
- if (I.getVariable()) {
- OS << ' ';
- dumpBareDeclRef(I.getVariable());
- }
- if (I.hasCopyExpr())
- dumpStmt(I.getCopyExpr());
- });
- }
+ for (const auto &I : D->captures())
+ Visit(I);
dumpStmt(D->getBody());
}
@@ -1940,42 +1396,27 @@ void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
// Stmt dumping methods.
//===----------------------------------------------------------------------===//
-void ASTDumper::dumpStmt(const Stmt *S) {
- dumpChild([=] {
+void ASTDumper::dumpStmt(const Stmt *S, StringRef Label) {
+ dumpChild(Label, [=] {
+ NodeDumper.Visit(S);
+
if (!S) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
return;
}
+ ConstStmtVisitor<ASTDumper>::Visit(S);
+
// Some statements have custom mechanisms for dumping their children.
- if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
- VisitDeclStmt(DS);
- return;
- }
- if (const GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(S)) {
- VisitGenericSelectionExpr(GSE);
+ if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S)) {
return;
}
- ConstStmtVisitor<ASTDumper>::Visit(S);
-
for (const Stmt *SubStmt : S->children())
dumpStmt(SubStmt);
});
}
-void ASTDumper::VisitStmt(const Stmt *Node) {
- {
- ColorScope Color(*this, StmtColor);
- OS << Node->getStmtClassName();
- }
- dumpPointer(Node);
- dumpSourceRange(Node->getSourceRange());
-}
-
void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
- VisitStmt(Node);
for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
E = Node->decl_end();
I != E; ++I)
@@ -1983,31 +1424,17 @@ void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
}
void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
- VisitStmt(Node);
for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
E = Node->getAttrs().end();
I != E; ++I)
dumpAttr(*I);
}
-void ASTDumper::VisitLabelStmt(const LabelStmt *Node) {
- VisitStmt(Node);
- OS << " '" << Node->getName() << "'";
-}
-
-void ASTDumper::VisitGotoStmt(const GotoStmt *Node) {
- VisitStmt(Node);
- OS << " '" << Node->getLabel()->getName() << "'";
- dumpPointer(Node->getLabel());
-}
-
void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) {
- VisitStmt(Node);
dumpDecl(Node->getExceptionDecl());
}
void ASTDumper::VisitCapturedStmt(const CapturedStmt *Node) {
- VisitStmt(Node);
dumpDecl(Node->getCapturedDecl());
}
@@ -2015,287 +1442,41 @@ void ASTDumper::VisitCapturedStmt(const CapturedStmt *Node) {
// OpenMP dumping methods.
//===----------------------------------------------------------------------===//
+void ASTDumper::Visit(const OMPClause *C) {
+ dumpChild([=] {
+ NodeDumper.Visit(C);
+ for (auto *S : C->children())
+ dumpStmt(S);
+ });
+}
+
void ASTDumper::VisitOMPExecutableDirective(
const OMPExecutableDirective *Node) {
- VisitStmt(Node);
- for (auto *C : Node->clauses()) {
- dumpChild([=] {
- if (!C) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>> OMPClause";
- return;
- }
- {
- ColorScope Color(*this, AttrColor);
- StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
- OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
- << ClauseName.drop_front() << "Clause";
- }
- dumpPointer(C);
- dumpSourceRange(SourceRange(C->getLocStart(), C->getLocEnd()));
- if (C->isImplicit())
- OS << " <implicit>";
- for (auto *S : C->children())
- dumpStmt(S);
- });
- }
+ for (const auto *C : Node->clauses())
+ Visit(C);
}
//===----------------------------------------------------------------------===//
// Expr dumping methods.
//===----------------------------------------------------------------------===//
-void ASTDumper::VisitExpr(const Expr *Node) {
- VisitStmt(Node);
- dumpType(Node->getType());
-
- {
- ColorScope Color(*this, ValueKindColor);
- switch (Node->getValueKind()) {
- case VK_RValue:
- break;
- case VK_LValue:
- OS << " lvalue";
- break;
- case VK_XValue:
- OS << " xvalue";
- break;
- }
- }
-
- {
- ColorScope Color(*this, ObjectKindColor);
- switch (Node->getObjectKind()) {
- case OK_Ordinary:
- break;
- case OK_BitField:
- OS << " bitfield";
- break;
- case OK_ObjCProperty:
- OS << " objcproperty";
- break;
- case OK_ObjCSubscript:
- OS << " objcsubscript";
- break;
- case OK_VectorComponent:
- OS << " vectorcomponent";
- break;
- }
- }
-}
-
-static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
- if (Node->path_empty())
- return;
-
- OS << " (";
- bool First = true;
- for (CastExpr::path_const_iterator I = Node->path_begin(),
- E = Node->path_end();
- I != E; ++I) {
- const CXXBaseSpecifier *Base = *I;
- if (!First)
- OS << " -> ";
-
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
-
- if (Base->isVirtual())
- OS << "virtual ";
- OS << RD->getName();
- First = false;
- }
-
- OS << ')';
-}
-
-void ASTDumper::VisitCastExpr(const CastExpr *Node) {
- VisitExpr(Node);
- OS << " <";
- {
- ColorScope Color(*this, CastColor);
- OS << Node->getCastKindName();
- }
- dumpBasePath(OS, Node);
- OS << ">";
-}
-
-void ASTDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
- VisitCastExpr(Node);
- if (Node->isPartOfExplicitCast())
- OS << " part_of_explicit_cast";
-}
-
-void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
- VisitExpr(Node);
-
- OS << " ";
- dumpBareDeclRef(Node->getDecl());
- if (Node->getDecl() != Node->getFoundDecl()) {
- OS << " (";
- dumpBareDeclRef(Node->getFoundDecl());
- OS << ")";
- }
-}
-
-void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) {
- VisitExpr(Node);
- OS << " (";
- if (!Node->requiresADL())
- OS << "no ";
- OS << "ADL) = '" << Node->getName() << '\'';
-
- UnresolvedLookupExpr::decls_iterator
- I = Node->decls_begin(), E = Node->decls_end();
- if (I == E)
- OS << " empty";
- for (; I != E; ++I)
- dumpPointer(*I);
-}
-
-void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
- VisitExpr(Node);
-
- {
- ColorScope Color(*this, DeclKindNameColor);
- OS << " " << Node->getDecl()->getDeclKindName() << "Decl";
- }
- OS << "='" << *Node->getDecl() << "'";
- dumpPointer(Node->getDecl());
- if (Node->isFreeIvar())
- OS << " isFreeIvar";
-}
-
-void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
- VisitExpr(Node);
- OS << " " << PredefinedExpr::getIdentTypeName(Node->getIdentType());
-}
-
-void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
- VisitExpr(Node);
- ColorScope Color(*this, ValueColor);
- OS << " " << Node->getValue();
-}
-
-void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
- VisitExpr(Node);
-
- bool isSigned = Node->getType()->isSignedIntegerType();
- ColorScope Color(*this, ValueColor);
- OS << " " << Node->getValue().toString(10, isSigned);
-}
-
-void ASTDumper::VisitFixedPointLiteral(const FixedPointLiteral *Node) {
- VisitExpr(Node);
-
- ColorScope Color(*this, ValueColor);
- OS << " " << Node->getValueAsString(/*Radix=*/10);
-}
-
-void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
- VisitExpr(Node);
- ColorScope Color(*this, ValueColor);
- OS << " " << Node->getValueAsApproximateDouble();
-}
-
-void ASTDumper::VisitStringLiteral(const StringLiteral *Str) {
- VisitExpr(Str);
- ColorScope Color(*this, ValueColor);
- OS << " ";
- Str->outputString(OS);
-}
void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
- VisitExpr(ILE);
if (auto *Filler = ILE->getArrayFiller()) {
- dumpChild([=] {
- OS << "array filler";
- dumpStmt(Filler);
- });
- }
- if (auto *Field = ILE->getInitializedFieldInUnion()) {
- OS << " field ";
- dumpBareDeclRef(Field);
- }
-}
-
-void ASTDumper::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
- VisitExpr(E);
-}
-
-void ASTDumper::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
- VisitExpr(E);
-}
-
-void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
- VisitExpr(Node);
- OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
- << " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
- if (!Node->canOverflow())
- OS << " cannot overflow";
-}
-
-void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
- const UnaryExprOrTypeTraitExpr *Node) {
- VisitExpr(Node);
- switch(Node->getKind()) {
- case UETT_SizeOf:
- OS << " sizeof";
- break;
- case UETT_AlignOf:
- OS << " alignof";
- break;
- case UETT_VecStep:
- OS << " vec_step";
- break;
- case UETT_OpenMPRequiredSimdAlign:
- OS << " __builtin_omp_required_simd_align";
- break;
+ dumpStmt(Filler, "array_filler");
}
- if (Node->isArgumentType())
- dumpType(Node->getArgumentType());
-}
-
-void ASTDumper::VisitMemberExpr(const MemberExpr *Node) {
- VisitExpr(Node);
- OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
- dumpPointer(Node->getMemberDecl());
-}
-
-void ASTDumper::VisitExtVectorElementExpr(const ExtVectorElementExpr *Node) {
- VisitExpr(Node);
- OS << " " << Node->getAccessor().getNameStart();
-}
-
-void ASTDumper::VisitBinaryOperator(const BinaryOperator *Node) {
- VisitExpr(Node);
- OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
-}
-
-void ASTDumper::VisitCompoundAssignOperator(
- const CompoundAssignOperator *Node) {
- VisitExpr(Node);
- OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
- << "' ComputeLHSTy=";
- dumpBareType(Node->getComputationLHSType());
- OS << " ComputeResultTy=";
- dumpBareType(Node->getComputationResultType());
}
void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
- VisitExpr(Node);
dumpDecl(Node->getBlockDecl());
}
void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
- VisitExpr(Node);
-
if (Expr *Source = Node->getSourceExpr())
dumpStmt(Source);
}
void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- VisitExpr(E);
if (E->isResultDependent())
OS << " result_dependent";
dumpStmt(E->getControllingExpr());
@@ -2305,7 +1486,7 @@ void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
dumpChild([=] {
if (const TypeSourceInfo *TSI = E->getAssocTypeSourceInfo(I)) {
OS << "case ";
- dumpType(TSI->getType());
+ NodeDumper.dumpType(TSI->getType());
} else {
OS << "default";
}
@@ -2320,394 +1501,42 @@ void ASTDumper::VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
}
}
-// GNU extensions.
-
-void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
- VisitExpr(Node);
- OS << " " << Node->getLabel()->getName();
- dumpPointer(Node->getLabel());
-}
-
//===----------------------------------------------------------------------===//
// C++ Expressions
//===----------------------------------------------------------------------===//
-void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
- VisitExpr(Node);
- OS << " " << Node->getCastName()
- << "<" << Node->getTypeAsWritten().getAsString() << ">"
- << " <" << Node->getCastKindName();
- dumpBasePath(OS, Node);
- OS << ">";
-}
-
-void ASTDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
- VisitExpr(Node);
- OS << " " << (Node->getValue() ? "true" : "false");
-}
-
-void ASTDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
- VisitExpr(Node);
- OS << " this";
-}
-
-void ASTDumper::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node) {
- VisitExpr(Node);
- OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
- << " <" << Node->getCastKindName() << ">";
-}
-
-void ASTDumper::VisitCXXUnresolvedConstructExpr(
- const CXXUnresolvedConstructExpr *Node) {
- VisitExpr(Node);
- dumpType(Node->getTypeAsWritten());
- if (Node->isListInitialization())
- OS << " list";
-}
-
-void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
- VisitExpr(Node);
- CXXConstructorDecl *Ctor = Node->getConstructor();
- dumpType(Ctor->getType());
- if (Node->isElidable())
- OS << " elidable";
- if (Node->isListInitialization())
- OS << " list";
- if (Node->isStdInitListInitialization())
- OS << " std::initializer_list";
- if (Node->requiresZeroInitialization())
- OS << " zeroing";
-}
-
-void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) {
- VisitExpr(Node);
- OS << " ";
- dumpCXXTemporary(Node->getTemporary());
-}
-
-void ASTDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
- VisitExpr(Node);
- if (Node->isGlobalNew())
- OS << " global";
- if (Node->isArray())
- OS << " array";
- if (Node->getOperatorNew()) {
- OS << ' ';
- dumpBareDeclRef(Node->getOperatorNew());
- }
- // We could dump the deallocation function used in case of error, but it's
- // usually not that interesting.
-}
-
-void ASTDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
- VisitExpr(Node);
- if (Node->isGlobalDelete())
- OS << " global";
- if (Node->isArrayForm())
- OS << " array";
- if (Node->getOperatorDelete()) {
- OS << ' ';
- dumpBareDeclRef(Node->getOperatorDelete());
- }
-}
-
-void
-ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) {
- VisitExpr(Node);
- if (const ValueDecl *VD = Node->getExtendingDecl()) {
- OS << " extended by ";
- dumpBareDeclRef(VD);
- }
-}
-
-void ASTDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
- VisitExpr(Node);
- for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
- dumpDeclRef(Node->getObject(i), "cleanup");
-}
-
-void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
- OS << "(CXXTemporary";
- dumpPointer(Temporary);
- OS << ")";
-}
-
void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
- VisitExpr(Node);
- dumpPointer(Node->getPack());
- dumpName(Node->getPack());
if (Node->isPartiallySubstituted())
for (const auto &A : Node->getPartialArguments())
dumpTemplateArgument(A);
}
-void ASTDumper::VisitCXXDependentScopeMemberExpr(
- const CXXDependentScopeMemberExpr *Node) {
- VisitExpr(Node);
- OS << " " << (Node->isArrow() ? "->" : ".") << Node->getMember();
-}
-
//===----------------------------------------------------------------------===//
// Obj-C Expressions
//===----------------------------------------------------------------------===//
-void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
- VisitExpr(Node);
- OS << " selector=";
- Node->getSelector().print(OS);
- switch (Node->getReceiverKind()) {
- case ObjCMessageExpr::Instance:
- break;
-
- case ObjCMessageExpr::Class:
- OS << " class=";
- dumpBareType(Node->getClassReceiver());
- break;
-
- case ObjCMessageExpr::SuperInstance:
- OS << " super (instance)";
- break;
-
- case ObjCMessageExpr::SuperClass:
- OS << " super (class)";
- break;
- }
-}
-
-void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
- VisitExpr(Node);
- if (auto *BoxingMethod = Node->getBoxingMethod()) {
- OS << " selector=";
- BoxingMethod->getSelector().print(OS);
- }
-}
-
void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
- VisitStmt(Node);
if (const VarDecl *CatchParam = Node->getCatchParamDecl())
dumpDecl(CatchParam);
- else
- OS << " catch all";
-}
-
-void ASTDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) {
- VisitExpr(Node);
- dumpType(Node->getEncodedType());
-}
-
-void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
- VisitExpr(Node);
-
- OS << " ";
- Node->getSelector().print(OS);
-}
-
-void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
- VisitExpr(Node);
-
- OS << ' ' << *Node->getProtocol();
-}
-
-void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
- VisitExpr(Node);
- if (Node->isImplicitProperty()) {
- OS << " Kind=MethodRef Getter=\"";
- if (Node->getImplicitPropertyGetter())
- Node->getImplicitPropertyGetter()->getSelector().print(OS);
- else
- OS << "(null)";
-
- OS << "\" Setter=\"";
- if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
- Setter->getSelector().print(OS);
- else
- OS << "(null)";
- OS << "\"";
- } else {
- OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"';
- }
-
- if (Node->isSuperReceiver())
- OS << " super";
-
- OS << " Messaging=";
- if (Node->isMessagingGetter() && Node->isMessagingSetter())
- OS << "Getter&Setter";
- else if (Node->isMessagingGetter())
- OS << "Getter";
- else if (Node->isMessagingSetter())
- OS << "Setter";
-}
-
-void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) {
- VisitExpr(Node);
- if (Node->isArraySubscriptRefExpr())
- OS << " Kind=ArraySubscript GetterForArray=\"";
- else
- OS << " Kind=DictionarySubscript GetterForDictionary=\"";
- if (Node->getAtIndexMethodDecl())
- Node->getAtIndexMethodDecl()->getSelector().print(OS);
- else
- OS << "(null)";
-
- if (Node->isArraySubscriptRefExpr())
- OS << "\" SetterForArray=\"";
- else
- OS << "\" SetterForDictionary=\"";
- if (Node->setAtIndexMethodDecl())
- Node->setAtIndexMethodDecl()->getSelector().print(OS);
- else
- OS << "(null)";
-}
-
-void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
- VisitExpr(Node);
- OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
//===----------------------------------------------------------------------===//
// Comments
//===----------------------------------------------------------------------===//
-const char *ASTDumper::getCommandName(unsigned CommandID) {
- if (Traits)
- return Traits->getCommandInfo(CommandID)->Name;
- const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID);
- if (Info)
- return Info->Name;
- return "<not a builtin command>";
-}
-
-void ASTDumper::dumpFullComment(const FullComment *C) {
- if (!C)
- return;
-
- FC = C;
- dumpComment(C);
- FC = nullptr;
-}
-
-void ASTDumper::dumpComment(const Comment *C) {
+void ASTDumper::dumpComment(const Comment *C, const FullComment *FC) {
dumpChild([=] {
+ NodeDumper.Visit(C, FC);
if (!C) {
- ColorScope Color(*this, NullColor);
- OS << "<<<NULL>>>";
return;
}
-
- {
- ColorScope Color(*this, CommentColor);
- OS << C->getCommentKindName();
- }
- dumpPointer(C);
- dumpSourceRange(C->getSourceRange());
- ConstCommentVisitor<ASTDumper>::visit(C);
+ ConstCommentVisitor<ASTDumper, void, const FullComment *>::visit(C, FC);
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I)
- dumpComment(*I);
+ dumpComment(*I, FC);
});
}
-void ASTDumper::visitTextComment(const TextComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
-void ASTDumper::visitInlineCommandComment(const InlineCommandComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
- switch (C->getRenderKind()) {
- case InlineCommandComment::RenderNormal:
- OS << " RenderNormal";
- break;
- case InlineCommandComment::RenderBold:
- OS << " RenderBold";
- break;
- case InlineCommandComment::RenderMonospaced:
- OS << " RenderMonospaced";
- break;
- case InlineCommandComment::RenderEmphasized:
- OS << " RenderEmphasized";
- break;
- }
-
- for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
- OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
-}
-
-void ASTDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) {
- OS << " Name=\"" << C->getTagName() << "\"";
- if (C->getNumAttrs() != 0) {
- OS << " Attrs: ";
- for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
- const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
- OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
- }
- }
- if (C->isSelfClosing())
- OS << " SelfClosing";
-}
-
-void ASTDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
- OS << " Name=\"" << C->getTagName() << "\"";
-}
-
-void ASTDumper::visitBlockCommandComment(const BlockCommandComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
- for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
- OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
-}
-
-void ASTDumper::visitParamCommandComment(const ParamCommandComment *C) {
- OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection());
-
- if (C->isDirectionExplicit())
- OS << " explicitly";
- else
- OS << " implicitly";
-
- if (C->hasParamName()) {
- if (C->isParamIndexValid())
- OS << " Param=\"" << C->getParamName(FC) << "\"";
- else
- OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
- }
-
- if (C->isParamIndexValid() && !C->isVarArgParam())
- OS << " ParamIndex=" << C->getParamIndex();
-}
-
-void ASTDumper::visitTParamCommandComment(const TParamCommandComment *C) {
- if (C->hasParamName()) {
- if (C->isPositionValid())
- OS << " Param=\"" << C->getParamName(FC) << "\"";
- else
- OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
- }
-
- if (C->isPositionValid()) {
- OS << " Position=<";
- for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
- OS << C->getIndex(i);
- if (i != e - 1)
- OS << ", ";
- }
- OS << ">";
- }
-}
-
-void ASTDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
- OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""
- " CloseName=\"" << C->getCloseName() << "\"";
-}
-
-void ASTDumper::visitVerbatimBlockLineComment(
- const VerbatimBlockLineComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
-void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
- OS << " Text=\"" << C->getText() << "\"";
-}
-
//===----------------------------------------------------------------------===//
// Type method implementations
//===----------------------------------------------------------------------===//
@@ -2816,12 +1645,16 @@ LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM) const {
const FullComment *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
ASTDumper D(OS, Traits, SM);
- D.dumpFullComment(FC);
+ D.dumpComment(FC, FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
const FullComment *FC = dyn_cast<FullComment>(this);
+ if (!FC)
+ return;
ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
- D.dumpFullComment(FC);
+ D.dumpComment(FC, FC);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index b360b3910636..44832557e97b 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTImporter.h"
+#include "clang/AST/ASTImporterLookupTable.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTStructuralEquivalence.h"
@@ -71,10 +72,43 @@
namespace clang {
+ using llvm::make_error;
+ using llvm::Error;
+ using llvm::Expected;
+ using ExpectedType = llvm::Expected<QualType>;
+ using ExpectedStmt = llvm::Expected<Stmt *>;
+ using ExpectedExpr = llvm::Expected<Expr *>;
+ using ExpectedDecl = llvm::Expected<Decl *>;
+ using ExpectedSLoc = llvm::Expected<SourceLocation>;
+
+ std::string ImportError::toString() const {
+ // FIXME: Improve error texts.
+ switch (Error) {
+ case NameConflict:
+ return "NameConflict";
+ case UnsupportedConstruct:
+ return "UnsupportedConstruct";
+ case Unknown:
+ return "Unknown error";
+ }
+ llvm_unreachable("Invalid error code.");
+ return "Invalid error code.";
+ }
+
+ void ImportError::log(raw_ostream &OS) const {
+ OS << toString();
+ }
+
+ std::error_code ImportError::convertToErrorCode() const {
+ llvm_unreachable("Function not implemented.");
+ }
+
+ char ImportError::ID;
+
template <class T>
- SmallVector<Decl*, 2>
+ SmallVector<Decl *, 2>
getCanonicalForwardRedeclChain(Redeclarable<T>* D) {
- SmallVector<Decl*, 2> Redecls;
+ SmallVector<Decl *, 2> Redecls;
for (auto *R : D->getFirstDecl()->redecls()) {
if (R != D->getFirstDecl())
Redecls.push_back(R);
@@ -85,9 +119,13 @@ namespace clang {
}
SmallVector<Decl*, 2> getCanonicalForwardRedeclChain(Decl* D) {
- // Currently only FunctionDecl is supported
- auto FD = cast<FunctionDecl>(D);
- return getCanonicalForwardRedeclChain<FunctionDecl>(FD);
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return getCanonicalForwardRedeclChain<FunctionDecl>(FD);
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return getCanonicalForwardRedeclChain<VarDecl>(VD);
+ if (auto *TD = dyn_cast<TagDecl>(D))
+ return getCanonicalForwardRedeclChain<TagDecl>(TD);
+ llvm_unreachable("Bad declaration kind");
}
void updateFlags(const Decl *From, Decl *To) {
@@ -97,12 +135,110 @@ namespace clang {
To->setIsUsed();
}
- class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
- public DeclVisitor<ASTNodeImporter, Decl *>,
- public StmtVisitor<ASTNodeImporter, Stmt *> {
+ // FIXME: Temporary until every import returns Expected.
+ template <>
+ LLVM_NODISCARD Error
+ ASTImporter::importInto(SourceLocation &To, const SourceLocation &From) {
+ To = Import(From);
+ if (From.isValid() && To.isInvalid())
+ return llvm::make_error<ImportError>();
+ return Error::success();
+ }
+ // FIXME: Temporary until every import returns Expected.
+ template <>
+ LLVM_NODISCARD Error
+ ASTImporter::importInto(QualType &To, const QualType &From) {
+ To = Import(From);
+ if (!From.isNull() && To.isNull())
+ return llvm::make_error<ImportError>();
+ return Error::success();
+ }
+
+ class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>,
+ public DeclVisitor<ASTNodeImporter, ExpectedDecl>,
+ public StmtVisitor<ASTNodeImporter, ExpectedStmt> {
ASTImporter &Importer;
- // Wrapper for an overload set.
+ // Use this instead of Importer.importInto .
+ template <typename ImportT>
+ LLVM_NODISCARD Error importInto(ImportT &To, const ImportT &From) {
+ return Importer.importInto(To, From);
+ }
+
+ // Use this to import pointers of specific type.
+ template <typename ImportT>
+ LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) {
+ auto ToI = Importer.Import(From);
+ if (!ToI && From)
+ return make_error<ImportError>();
+ To = cast_or_null<ImportT>(ToI);
+ return Error::success();
+ // FIXME: This should be the final code.
+ //auto ToOrErr = Importer.Import(From);
+ //if (ToOrErr) {
+ // To = cast_or_null<ImportT>(*ToOrErr);
+ //}
+ //return ToOrErr.takeError();
+ }
+
+ // Call the import function of ASTImporter for a baseclass of type `T` and
+ // cast the return value to `T`.
+ template <typename T>
+ Expected<T *> import(T *From) {
+ auto *To = Importer.Import(From);
+ if (!To && From)
+ return make_error<ImportError>();
+ return cast_or_null<T>(To);
+ // FIXME: This should be the final code.
+ //auto ToOrErr = Importer.Import(From);
+ //if (!ToOrErr)
+ // return ToOrErr.takeError();
+ //return cast_or_null<T>(*ToOrErr);
+ }
+
+ template <typename T>
+ Expected<T *> import(const T *From) {
+ return import(const_cast<T *>(From));
+ }
+
+ // Call the import function of ASTImporter for type `T`.
+ template <typename T>
+ Expected<T> import(const T &From) {
+ T To = Importer.Import(From);
+ T DefaultT;
+ if (To == DefaultT && !(From == DefaultT))
+ return make_error<ImportError>();
+ return To;
+ // FIXME: This should be the final code.
+ //return Importer.Import(From);
+ }
+
+ template <class T>
+ Expected<std::tuple<T>>
+ importSeq(const T &From) {
+ Expected<T> ToOrErr = import(From);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ return std::make_tuple<T>(std::move(*ToOrErr));
+ }
+
+ // Import multiple objects with a single function call.
+ // This should work for every type for which a variant of `import` exists.
+ // The arguments are processed from left to right and import is stopped on
+ // first error.
+ template <class THead, class... TTail>
+ Expected<std::tuple<THead, TTail...>>
+ importSeq(const THead &FromHead, const TTail &...FromTail) {
+ Expected<std::tuple<THead>> ToHeadOrErr = importSeq(FromHead);
+ if (!ToHeadOrErr)
+ return ToHeadOrErr.takeError();
+ Expected<std::tuple<TTail...>> ToTailOrErr = importSeq(FromTail...);
+ if (!ToTailOrErr)
+ return ToTailOrErr.takeError();
+ return std::tuple_cat(*ToHeadOrErr, *ToTailOrErr);
+ }
+
+// Wrapper for an overload set.
template <typename ToDeclT> struct CallOverloadedCreateFun {
template <typename... Args>
auto operator()(Args &&... args)
@@ -147,16 +283,23 @@ namespace clang {
LLVM_NODISCARD bool
GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun,
FromDeclT *FromD, Args &&... args) {
+ // FIXME: This code is needed later.
+ //if (Importer.getImportDeclErrorIfAny(FromD)) {
+ // ToD = nullptr;
+ // return true; // Already imported but with error.
+ //}
ToD = cast_or_null<ToDeclT>(Importer.GetAlreadyImportedOrNull(FromD));
if (ToD)
return true; // Already imported.
ToD = CreateFun(std::forward<Args>(args)...);
+ // Keep track of imported Decls.
+ Importer.MapImported(FromD, ToD);
+ Importer.AddToLookupTable(ToD);
InitializeImportedDecl(FromD, ToD);
return false; // A new Decl is created.
}
void InitializeImportedDecl(Decl *FromD, Decl *ToD) {
- Importer.MapImported(FromD, ToD);
ToD->IdentifierNamespace = FromD->IdentifierNamespace;
if (FromD->hasAttrs())
for (const Attr *FromAttr : FromD->getAttrs())
@@ -170,84 +313,82 @@ namespace clang {
public:
explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {}
- using TypeVisitor<ASTNodeImporter, QualType>::Visit;
- using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
- using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
+ using TypeVisitor<ASTNodeImporter, ExpectedType>::Visit;
+ using DeclVisitor<ASTNodeImporter, ExpectedDecl>::Visit;
+ using StmtVisitor<ASTNodeImporter, ExpectedStmt>::Visit;
// Importing types
- QualType VisitType(const Type *T);
- QualType VisitAtomicType(const AtomicType *T);
- QualType VisitBuiltinType(const BuiltinType *T);
- QualType VisitDecayedType(const DecayedType *T);
- QualType VisitComplexType(const ComplexType *T);
- QualType VisitPointerType(const PointerType *T);
- QualType VisitBlockPointerType(const BlockPointerType *T);
- QualType VisitLValueReferenceType(const LValueReferenceType *T);
- QualType VisitRValueReferenceType(const RValueReferenceType *T);
- QualType VisitMemberPointerType(const MemberPointerType *T);
- QualType VisitConstantArrayType(const ConstantArrayType *T);
- QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
- QualType VisitVariableArrayType(const VariableArrayType *T);
- QualType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
+ ExpectedType VisitType(const Type *T);
+ ExpectedType VisitAtomicType(const AtomicType *T);
+ ExpectedType VisitBuiltinType(const BuiltinType *T);
+ ExpectedType VisitDecayedType(const DecayedType *T);
+ ExpectedType VisitComplexType(const ComplexType *T);
+ ExpectedType VisitPointerType(const PointerType *T);
+ ExpectedType VisitBlockPointerType(const BlockPointerType *T);
+ ExpectedType VisitLValueReferenceType(const LValueReferenceType *T);
+ ExpectedType VisitRValueReferenceType(const RValueReferenceType *T);
+ ExpectedType VisitMemberPointerType(const MemberPointerType *T);
+ ExpectedType VisitConstantArrayType(const ConstantArrayType *T);
+ ExpectedType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ ExpectedType VisitVariableArrayType(const VariableArrayType *T);
+ ExpectedType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
// FIXME: DependentSizedExtVectorType
- QualType VisitVectorType(const VectorType *T);
- QualType VisitExtVectorType(const ExtVectorType *T);
- QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
- QualType VisitFunctionProtoType(const FunctionProtoType *T);
- QualType VisitUnresolvedUsingType(const UnresolvedUsingType *T);
- QualType VisitParenType(const ParenType *T);
- QualType VisitTypedefType(const TypedefType *T);
- QualType VisitTypeOfExprType(const TypeOfExprType *T);
+ ExpectedType VisitVectorType(const VectorType *T);
+ ExpectedType VisitExtVectorType(const ExtVectorType *T);
+ ExpectedType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ ExpectedType VisitFunctionProtoType(const FunctionProtoType *T);
+ ExpectedType VisitUnresolvedUsingType(const UnresolvedUsingType *T);
+ ExpectedType VisitParenType(const ParenType *T);
+ ExpectedType VisitTypedefType(const TypedefType *T);
+ ExpectedType VisitTypeOfExprType(const TypeOfExprType *T);
// FIXME: DependentTypeOfExprType
- QualType VisitTypeOfType(const TypeOfType *T);
- QualType VisitDecltypeType(const DecltypeType *T);
- QualType VisitUnaryTransformType(const UnaryTransformType *T);
- QualType VisitAutoType(const AutoType *T);
- QualType VisitInjectedClassNameType(const InjectedClassNameType *T);
+ ExpectedType VisitTypeOfType(const TypeOfType *T);
+ ExpectedType VisitDecltypeType(const DecltypeType *T);
+ ExpectedType VisitUnaryTransformType(const UnaryTransformType *T);
+ ExpectedType VisitAutoType(const AutoType *T);
+ ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T);
// FIXME: DependentDecltypeType
- QualType VisitRecordType(const RecordType *T);
- QualType VisitEnumType(const EnumType *T);
- QualType VisitAttributedType(const AttributedType *T);
- QualType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
- QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T);
- QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
- QualType VisitElaboratedType(const ElaboratedType *T);
- QualType VisitDependentNameType(const DependentNameType *T);
- QualType VisitPackExpansionType(const PackExpansionType *T);
- QualType VisitDependentTemplateSpecializationType(
+ ExpectedType VisitRecordType(const RecordType *T);
+ ExpectedType VisitEnumType(const EnumType *T);
+ ExpectedType VisitAttributedType(const AttributedType *T);
+ ExpectedType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
+ ExpectedType VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T);
+ ExpectedType VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T);
+ ExpectedType VisitElaboratedType(const ElaboratedType *T);
+ ExpectedType VisitDependentNameType(const DependentNameType *T);
+ ExpectedType VisitPackExpansionType(const PackExpansionType *T);
+ ExpectedType VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType *T);
- QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
- QualType VisitObjCObjectType(const ObjCObjectType *T);
- QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+ ExpectedType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ ExpectedType VisitObjCObjectType(const ObjCObjectType *T);
+ ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
// Importing declarations
- bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
- DeclContext *&LexicalDC, DeclarationName &Name,
- NamedDecl *&ToD, SourceLocation &Loc);
- void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
- void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
- DeclarationNameInfo& To);
- void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
- void ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
-
- bool ImportCastPath(CastExpr *E, CXXCastPath &Path);
+ Error ImportDeclParts(
+ NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC,
+ DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc);
+ Error ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr);
+ Error ImportDeclarationNameLoc(
+ const DeclarationNameInfo &From, DeclarationNameInfo &To);
+ Error ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+ Error ImportDeclContext(
+ Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC);
+ Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
+
+ Expected<CXXCastPath> ImportCastPath(CastExpr *E);
using Designator = DesignatedInitExpr::Designator;
- Designator ImportDesignator(const Designator &D);
-
- Optional<LambdaCapture> ImportLambdaCapture(const LambdaCapture &From);
-
/// What we should import from the definition.
enum ImportDefinitionKind {
/// Import the default subset of the definition, which might be
/// nothing (if minimal import is set) or might be everything (if minimal
/// import is not set).
IDK_Default,
-
/// Import everything.
IDK_Everything,
-
/// Import only the bare bones needed to establish a valid
/// DeclContext.
IDK_Basic
@@ -258,42 +399,44 @@ namespace clang {
(IDK == IDK_Default && !Importer.isMinimalImport());
}
- bool ImportDefinition(RecordDecl *From, RecordDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(VarDecl *From, VarDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(EnumDecl *From, EnumDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
- ImportDefinitionKind Kind = IDK_Default);
- TemplateParameterList *ImportTemplateParameterList(
+ Error ImportInitializer(VarDecl *From, VarDecl *To);
+ Error ImportDefinition(
+ RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Error ImportDefinition(
+ ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ Expected<TemplateParameterList *> ImportTemplateParameterList(
TemplateParameterList *Params);
- TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
- Optional<TemplateArgumentLoc> ImportTemplateArgumentLoc(
- const TemplateArgumentLoc &TALoc);
- bool ImportTemplateArguments(const TemplateArgument *FromArgs,
- unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs);
+ Error ImportTemplateArguments(
+ const TemplateArgument *FromArgs, unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+ Expected<TemplateArgument>
+ ImportTemplateArgument(const TemplateArgument &From);
template <typename InContainerTy>
- bool ImportTemplateArgumentListInfo(const InContainerTy &Container,
- TemplateArgumentListInfo &ToTAInfo);
+ Error ImportTemplateArgumentListInfo(
+ const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo);
template<typename InContainerTy>
- bool ImportTemplateArgumentListInfo(SourceLocation FromLAngleLoc,
- SourceLocation FromRAngleLoc,
- const InContainerTy &Container,
- TemplateArgumentListInfo &Result);
+ Error ImportTemplateArgumentListInfo(
+ SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
+ const InContainerTy &Container, TemplateArgumentListInfo &Result);
using TemplateArgsTy = SmallVector<TemplateArgument, 8>;
- using OptionalTemplateArgsTy = Optional<TemplateArgsTy>;
- std::tuple<FunctionTemplateDecl *, OptionalTemplateArgsTy>
+ using FunctionTemplateAndArgsTy =
+ std::tuple<FunctionTemplateDecl *, TemplateArgsTy>;
+ Expected<FunctionTemplateAndArgsTy>
ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD);
- bool ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
+ Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD);
bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
@@ -307,257 +450,498 @@ namespace clang {
bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To);
bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
- Decl *VisitDecl(Decl *D);
- Decl *VisitEmptyDecl(EmptyDecl *D);
- Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
- Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
- Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
- Decl *VisitNamespaceDecl(NamespaceDecl *D);
- Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
- Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
- Decl *VisitTypedefDecl(TypedefDecl *D);
- Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
- Decl *VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
- Decl *VisitLabelDecl(LabelDecl *D);
- Decl *VisitEnumDecl(EnumDecl *D);
- Decl *VisitRecordDecl(RecordDecl *D);
- Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
- Decl *VisitFunctionDecl(FunctionDecl *D);
- Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
- Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
- Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
- Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
- Decl *VisitFieldDecl(FieldDecl *D);
- Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
- Decl *VisitFriendDecl(FriendDecl *D);
- Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
- Decl *VisitVarDecl(VarDecl *D);
- Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
- Decl *VisitParmVarDecl(ParmVarDecl *D);
- Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
- Decl *VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
- Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
- Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
- Decl *VisitLinkageSpecDecl(LinkageSpecDecl *D);
- Decl *VisitUsingDecl(UsingDecl *D);
- Decl *VisitUsingShadowDecl(UsingShadowDecl *D);
- Decl *VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
- Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
- Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
-
- ObjCTypeParamList *ImportObjCTypeParamList(ObjCTypeParamList *list);
- Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
- Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
- Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
- Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
- Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
- Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
- Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
- Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
- Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
- Decl *VisitClassTemplateSpecializationDecl(
+ ExpectedDecl VisitDecl(Decl *D);
+ ExpectedDecl VisitImportDecl(ImportDecl *D);
+ ExpectedDecl VisitEmptyDecl(EmptyDecl *D);
+ ExpectedDecl VisitAccessSpecDecl(AccessSpecDecl *D);
+ ExpectedDecl VisitStaticAssertDecl(StaticAssertDecl *D);
+ ExpectedDecl VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ ExpectedDecl VisitNamespaceDecl(NamespaceDecl *D);
+ ExpectedDecl VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ ExpectedDecl VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
+ ExpectedDecl VisitTypedefDecl(TypedefDecl *D);
+ ExpectedDecl VisitTypeAliasDecl(TypeAliasDecl *D);
+ ExpectedDecl VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ ExpectedDecl VisitLabelDecl(LabelDecl *D);
+ ExpectedDecl VisitEnumDecl(EnumDecl *D);
+ ExpectedDecl VisitRecordDecl(RecordDecl *D);
+ ExpectedDecl VisitEnumConstantDecl(EnumConstantDecl *D);
+ ExpectedDecl VisitFunctionDecl(FunctionDecl *D);
+ ExpectedDecl VisitCXXMethodDecl(CXXMethodDecl *D);
+ ExpectedDecl VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ ExpectedDecl VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ ExpectedDecl VisitCXXConversionDecl(CXXConversionDecl *D);
+ ExpectedDecl VisitFieldDecl(FieldDecl *D);
+ ExpectedDecl VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ ExpectedDecl VisitFriendDecl(FriendDecl *D);
+ ExpectedDecl VisitObjCIvarDecl(ObjCIvarDecl *D);
+ ExpectedDecl VisitVarDecl(VarDecl *D);
+ ExpectedDecl VisitImplicitParamDecl(ImplicitParamDecl *D);
+ ExpectedDecl VisitParmVarDecl(ParmVarDecl *D);
+ ExpectedDecl VisitObjCMethodDecl(ObjCMethodDecl *D);
+ ExpectedDecl VisitObjCTypeParamDecl(ObjCTypeParamDecl *D);
+ ExpectedDecl VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ ExpectedDecl VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ ExpectedDecl VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ ExpectedDecl VisitUsingDecl(UsingDecl *D);
+ ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D);
+ ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ ExpectedDecl VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+
+ Expected<ObjCTypeParamList *>
+ ImportObjCTypeParamList(ObjCTypeParamList *list);
+
+ ExpectedDecl VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ ExpectedDecl VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ ExpectedDecl VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ ExpectedDecl VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ ExpectedDecl VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ ExpectedDecl VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ ExpectedDecl VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ ExpectedDecl VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ ExpectedDecl VisitClassTemplateDecl(ClassTemplateDecl *D);
+ ExpectedDecl VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D);
- Decl *VisitVarTemplateDecl(VarTemplateDecl *D);
- Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
- Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ ExpectedDecl VisitVarTemplateDecl(VarTemplateDecl *D);
+ ExpectedDecl VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
+ ExpectedDecl VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
// Importing statements
- DeclGroupRef ImportDeclGroup(DeclGroupRef DG);
-
- Stmt *VisitStmt(Stmt *S);
- Stmt *VisitGCCAsmStmt(GCCAsmStmt *S);
- Stmt *VisitDeclStmt(DeclStmt *S);
- Stmt *VisitNullStmt(NullStmt *S);
- Stmt *VisitCompoundStmt(CompoundStmt *S);
- Stmt *VisitCaseStmt(CaseStmt *S);
- Stmt *VisitDefaultStmt(DefaultStmt *S);
- Stmt *VisitLabelStmt(LabelStmt *S);
- Stmt *VisitAttributedStmt(AttributedStmt *S);
- Stmt *VisitIfStmt(IfStmt *S);
- Stmt *VisitSwitchStmt(SwitchStmt *S);
- Stmt *VisitWhileStmt(WhileStmt *S);
- Stmt *VisitDoStmt(DoStmt *S);
- Stmt *VisitForStmt(ForStmt *S);
- Stmt *VisitGotoStmt(GotoStmt *S);
- Stmt *VisitIndirectGotoStmt(IndirectGotoStmt *S);
- Stmt *VisitContinueStmt(ContinueStmt *S);
- Stmt *VisitBreakStmt(BreakStmt *S);
- Stmt *VisitReturnStmt(ReturnStmt *S);
+ ExpectedStmt VisitStmt(Stmt *S);
+ ExpectedStmt VisitGCCAsmStmt(GCCAsmStmt *S);
+ ExpectedStmt VisitDeclStmt(DeclStmt *S);
+ ExpectedStmt VisitNullStmt(NullStmt *S);
+ ExpectedStmt VisitCompoundStmt(CompoundStmt *S);
+ ExpectedStmt VisitCaseStmt(CaseStmt *S);
+ ExpectedStmt VisitDefaultStmt(DefaultStmt *S);
+ ExpectedStmt VisitLabelStmt(LabelStmt *S);
+ ExpectedStmt VisitAttributedStmt(AttributedStmt *S);
+ ExpectedStmt VisitIfStmt(IfStmt *S);
+ ExpectedStmt VisitSwitchStmt(SwitchStmt *S);
+ ExpectedStmt VisitWhileStmt(WhileStmt *S);
+ ExpectedStmt VisitDoStmt(DoStmt *S);
+ ExpectedStmt VisitForStmt(ForStmt *S);
+ ExpectedStmt VisitGotoStmt(GotoStmt *S);
+ ExpectedStmt VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ ExpectedStmt VisitContinueStmt(ContinueStmt *S);
+ ExpectedStmt VisitBreakStmt(BreakStmt *S);
+ ExpectedStmt VisitReturnStmt(ReturnStmt *S);
// FIXME: MSAsmStmt
// FIXME: SEHExceptStmt
// FIXME: SEHFinallyStmt
// FIXME: SEHTryStmt
// FIXME: SEHLeaveStmt
// FIXME: CapturedStmt
- Stmt *VisitCXXCatchStmt(CXXCatchStmt *S);
- Stmt *VisitCXXTryStmt(CXXTryStmt *S);
- Stmt *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ ExpectedStmt VisitCXXCatchStmt(CXXCatchStmt *S);
+ ExpectedStmt VisitCXXTryStmt(CXXTryStmt *S);
+ ExpectedStmt VisitCXXForRangeStmt(CXXForRangeStmt *S);
// FIXME: MSDependentExistsStmt
- Stmt *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
- Stmt *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
- Stmt *VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
- Stmt *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
- Stmt *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
- Stmt *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
- Stmt *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+ ExpectedStmt VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ ExpectedStmt VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ ExpectedStmt VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S);
+ ExpectedStmt VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ ExpectedStmt VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ ExpectedStmt VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ ExpectedStmt VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
// Importing expressions
- Expr *VisitExpr(Expr *E);
- Expr *VisitVAArgExpr(VAArgExpr *E);
- Expr *VisitGNUNullExpr(GNUNullExpr *E);
- Expr *VisitPredefinedExpr(PredefinedExpr *E);
- Expr *VisitDeclRefExpr(DeclRefExpr *E);
- Expr *VisitImplicitValueInitExpr(ImplicitValueInitExpr *ILE);
- Expr *VisitDesignatedInitExpr(DesignatedInitExpr *E);
- Expr *VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
- Expr *VisitIntegerLiteral(IntegerLiteral *E);
- Expr *VisitFloatingLiteral(FloatingLiteral *E);
- Expr *VisitCharacterLiteral(CharacterLiteral *E);
- Expr *VisitStringLiteral(StringLiteral *E);
- Expr *VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- Expr *VisitAtomicExpr(AtomicExpr *E);
- Expr *VisitAddrLabelExpr(AddrLabelExpr *E);
- Expr *VisitParenExpr(ParenExpr *E);
- Expr *VisitParenListExpr(ParenListExpr *E);
- Expr *VisitStmtExpr(StmtExpr *E);
- Expr *VisitUnaryOperator(UnaryOperator *E);
- Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
- Expr *VisitBinaryOperator(BinaryOperator *E);
- Expr *VisitConditionalOperator(ConditionalOperator *E);
- Expr *VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
- Expr *VisitOpaqueValueExpr(OpaqueValueExpr *E);
- Expr *VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
- Expr *VisitExpressionTraitExpr(ExpressionTraitExpr *E);
- Expr *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
- Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
- Expr *VisitExplicitCastExpr(ExplicitCastExpr *E);
- Expr *VisitOffsetOfExpr(OffsetOfExpr *OE);
- Expr *VisitCXXThrowExpr(CXXThrowExpr *E);
- Expr *VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
- Expr *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
- Expr *VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
- Expr *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
- Expr *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE);
- Expr *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
- Expr *VisitPackExpansionExpr(PackExpansionExpr *E);
- Expr *VisitSizeOfPackExpr(SizeOfPackExpr *E);
- Expr *VisitCXXNewExpr(CXXNewExpr *CE);
- Expr *VisitCXXDeleteExpr(CXXDeleteExpr *E);
- Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
- Expr *VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
- Expr *VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
- Expr *VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
- Expr *VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *CE);
- Expr *VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
- Expr *VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
- Expr *VisitExprWithCleanups(ExprWithCleanups *EWC);
- Expr *VisitCXXThisExpr(CXXThisExpr *E);
- Expr *VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
- Expr *VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
- Expr *VisitMemberExpr(MemberExpr *E);
- Expr *VisitCallExpr(CallExpr *E);
- Expr *VisitLambdaExpr(LambdaExpr *LE);
- Expr *VisitInitListExpr(InitListExpr *E);
- Expr *VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
- Expr *VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E);
- Expr *VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
- Expr *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
- Expr *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
- Expr *VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
- Expr *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
- Expr *VisitTypeTraitExpr(TypeTraitExpr *E);
- Expr *VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ ExpectedStmt VisitExpr(Expr *E);
+ ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
+ ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
+ ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E);
+ ExpectedStmt VisitDeclRefExpr(DeclRefExpr *E);
+ ExpectedStmt VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ ExpectedStmt VisitDesignatedInitExpr(DesignatedInitExpr *E);
+ ExpectedStmt VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
+ ExpectedStmt VisitIntegerLiteral(IntegerLiteral *E);
+ ExpectedStmt VisitFloatingLiteral(FloatingLiteral *E);
+ ExpectedStmt VisitImaginaryLiteral(ImaginaryLiteral *E);
+ ExpectedStmt VisitCharacterLiteral(CharacterLiteral *E);
+ ExpectedStmt VisitStringLiteral(StringLiteral *E);
+ ExpectedStmt VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ ExpectedStmt VisitAtomicExpr(AtomicExpr *E);
+ ExpectedStmt VisitAddrLabelExpr(AddrLabelExpr *E);
+ ExpectedStmt VisitConstantExpr(ConstantExpr *E);
+ ExpectedStmt VisitParenExpr(ParenExpr *E);
+ ExpectedStmt VisitParenListExpr(ParenListExpr *E);
+ ExpectedStmt VisitStmtExpr(StmtExpr *E);
+ ExpectedStmt VisitUnaryOperator(UnaryOperator *E);
+ ExpectedStmt VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ ExpectedStmt VisitBinaryOperator(BinaryOperator *E);
+ ExpectedStmt VisitConditionalOperator(ConditionalOperator *E);
+ ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
+ ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E);
+ ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
+ ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E);
+ ExpectedStmt VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ ExpectedStmt VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ ExpectedStmt VisitImplicitCastExpr(ImplicitCastExpr *E);
+ ExpectedStmt VisitExplicitCastExpr(ExplicitCastExpr *E);
+ ExpectedStmt VisitOffsetOfExpr(OffsetOfExpr *OE);
+ ExpectedStmt VisitCXXThrowExpr(CXXThrowExpr *E);
+ ExpectedStmt VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
+ ExpectedStmt VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+ ExpectedStmt VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ ExpectedStmt VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ ExpectedStmt VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
+ ExpectedStmt VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ ExpectedStmt VisitPackExpansionExpr(PackExpansionExpr *E);
+ ExpectedStmt VisitSizeOfPackExpr(SizeOfPackExpr *E);
+ ExpectedStmt VisitCXXNewExpr(CXXNewExpr *E);
+ ExpectedStmt VisitCXXDeleteExpr(CXXDeleteExpr *E);
+ ExpectedStmt VisitCXXConstructExpr(CXXConstructExpr *E);
+ ExpectedStmt VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
+ ExpectedStmt VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
+ ExpectedStmt VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
+ ExpectedStmt VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
+ ExpectedStmt VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
+ ExpectedStmt VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
+ ExpectedStmt VisitExprWithCleanups(ExprWithCleanups *E);
+ ExpectedStmt VisitCXXThisExpr(CXXThisExpr *E);
+ ExpectedStmt VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
+ ExpectedStmt VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
+ ExpectedStmt VisitMemberExpr(MemberExpr *E);
+ ExpectedStmt VisitCallExpr(CallExpr *E);
+ ExpectedStmt VisitLambdaExpr(LambdaExpr *LE);
+ ExpectedStmt VisitInitListExpr(InitListExpr *E);
+ ExpectedStmt VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
+ ExpectedStmt VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E);
+ ExpectedStmt VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
+ ExpectedStmt VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
+ ExpectedStmt VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
+ ExpectedStmt VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
+ ExpectedStmt VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
+ ExpectedStmt VisitTypeTraitExpr(TypeTraitExpr *E);
+ ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E);
template<typename IIter, typename OIter>
- void ImportArray(IIter Ibegin, IIter Iend, OIter Obegin) {
+ Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
using ItemT = typename std::remove_reference<decltype(*Obegin)>::type;
-
- ASTImporter &ImporterRef = Importer;
- std::transform(Ibegin, Iend, Obegin,
- [&ImporterRef](ItemT From) -> ItemT {
- return ImporterRef.Import(From);
- });
- }
-
- template<typename IIter, typename OIter>
- bool ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
- using ItemT = typename std::remove_reference<decltype(**Obegin)>::type;
-
- ASTImporter &ImporterRef = Importer;
- bool Failed = false;
- std::transform(Ibegin, Iend, Obegin,
- [&ImporterRef, &Failed](ItemT *From) -> ItemT * {
- auto *To = cast_or_null<ItemT>(ImporterRef.Import(From));
- if (!To && From)
- Failed = true;
- return To;
- });
- return Failed;
+ for (; Ibegin != Iend; ++Ibegin, ++Obegin) {
+ Expected<ItemT> ToOrErr = import(*Ibegin);
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ *Obegin = *ToOrErr;
+ }
+ return Error::success();
}
+ // Import every item from a container structure into an output container.
+ // If error occurs, stops at first error and returns the error.
+ // The output container should have space for all needed elements (it is not
+ // expanded, new items are put into from the beginning).
template<typename InContainerTy, typename OutContainerTy>
- bool ImportContainerChecked(const InContainerTy &InContainer,
- OutContainerTy &OutContainer) {
- return ImportArrayChecked(InContainer.begin(), InContainer.end(),
- OutContainer.begin());
+ Error ImportContainerChecked(
+ const InContainerTy &InContainer, OutContainerTy &OutContainer) {
+ return ImportArrayChecked(
+ InContainer.begin(), InContainer.end(), OutContainer.begin());
}
template<typename InContainerTy, typename OIter>
- bool ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) {
+ Error ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) {
return ImportArrayChecked(InContainer.begin(), InContainer.end(), Obegin);
}
- // Importing overrides.
void ImportOverrides(CXXMethodDecl *ToMethod, CXXMethodDecl *FromMethod);
- FunctionDecl *FindFunctionTemplateSpecialization(FunctionDecl *FromFD);
+ Expected<FunctionDecl *> FindFunctionTemplateSpecialization(
+ FunctionDecl *FromFD);
};
+// FIXME: Temporary until every import returns Expected.
+template <>
+Expected<TemplateName> ASTNodeImporter::import(const TemplateName &From) {
+ TemplateName To = Importer.Import(From);
+ if (To.isNull() && !From.isNull())
+ return make_error<ImportError>();
+ return To;
+}
+
template <typename InContainerTy>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo(
SourceLocation FromLAngleLoc, SourceLocation FromRAngleLoc,
const InContainerTy &Container, TemplateArgumentListInfo &Result) {
- TemplateArgumentListInfo ToTAInfo(Importer.Import(FromLAngleLoc),
- Importer.Import(FromRAngleLoc));
- if (ImportTemplateArgumentListInfo(Container, ToTAInfo))
- return true;
+ auto ToLAngleLocOrErr = import(FromLAngleLoc);
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(FromRAngleLoc);
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ TemplateArgumentListInfo ToTAInfo(*ToLAngleLocOrErr, *ToRAngleLocOrErr);
+ if (auto Err = ImportTemplateArgumentListInfo(Container, ToTAInfo))
+ return Err;
Result = ToTAInfo;
- return false;
+ return Error::success();
}
template <>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo<TemplateArgumentListInfo>(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo<TemplateArgumentListInfo>(
const TemplateArgumentListInfo &From, TemplateArgumentListInfo &Result) {
return ImportTemplateArgumentListInfo(
From.getLAngleLoc(), From.getRAngleLoc(), From.arguments(), Result);
}
template <>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo<
- ASTTemplateArgumentListInfo>(const ASTTemplateArgumentListInfo &From,
- TemplateArgumentListInfo &Result) {
- return ImportTemplateArgumentListInfo(From.LAngleLoc, From.RAngleLoc,
- From.arguments(), Result);
+Error ASTNodeImporter::ImportTemplateArgumentListInfo<
+ ASTTemplateArgumentListInfo>(
+ const ASTTemplateArgumentListInfo &From,
+ TemplateArgumentListInfo &Result) {
+ return ImportTemplateArgumentListInfo(
+ From.LAngleLoc, From.RAngleLoc, From.arguments(), Result);
}
-std::tuple<FunctionTemplateDecl *, ASTNodeImporter::OptionalTemplateArgsTy>
+Expected<ASTNodeImporter::FunctionTemplateAndArgsTy>
ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
FunctionDecl *FromFD) {
assert(FromFD->getTemplatedKind() ==
- FunctionDecl::TK_FunctionTemplateSpecialization);
+ FunctionDecl::TK_FunctionTemplateSpecialization);
+
+ FunctionTemplateAndArgsTy Result;
+
auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
- auto *Template = cast_or_null<FunctionTemplateDecl>(
- Importer.Import(FTSInfo->getTemplate()));
+ if (Error Err = importInto(std::get<0>(Result), FTSInfo->getTemplate()))
+ return std::move(Err);
// Import template arguments.
auto TemplArgs = FTSInfo->TemplateArguments->asArray();
- TemplateArgsTy ToTemplArgs;
- if (ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
- ToTemplArgs)) // Error during import.
- return std::make_tuple(Template, OptionalTemplateArgsTy());
+ if (Error Err = ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
+ std::get<1>(Result)))
+ return std::move(Err);
- return std::make_tuple(Template, ToTemplArgs);
+ return Result;
+}
+
+template <>
+Expected<TemplateParameterList *>
+ASTNodeImporter::import(TemplateParameterList *From) {
+ SmallVector<NamedDecl *, 4> To(From->size());
+ if (Error Err = ImportContainerChecked(*From, To))
+ return std::move(Err);
+
+ ExpectedExpr ToRequiresClause = import(From->getRequiresClause());
+ if (!ToRequiresClause)
+ return ToRequiresClause.takeError();
+
+ auto ToTemplateLocOrErr = import(From->getTemplateLoc());
+ if (!ToTemplateLocOrErr)
+ return ToTemplateLocOrErr.takeError();
+ auto ToLAngleLocOrErr = import(From->getLAngleLoc());
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(From->getRAngleLoc());
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ return TemplateParameterList::Create(
+ Importer.getToContext(),
+ *ToTemplateLocOrErr,
+ *ToLAngleLocOrErr,
+ To,
+ *ToRAngleLocOrErr,
+ *ToRequiresClause);
+}
+
+template <>
+Expected<TemplateArgument>
+ASTNodeImporter::import(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+
+ case TemplateArgument::Type: {
+ ExpectedType ToTypeOrErr = import(From.getAsType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToTypeOrErr);
+ }
+
+ case TemplateArgument::Integral: {
+ ExpectedType ToTypeOrErr = import(From.getIntegralType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(From, *ToTypeOrErr);
+ }
+
+ case TemplateArgument::Declaration: {
+ Expected<ValueDecl *> ToOrErr = import(From.getAsDecl());
+ if (!ToOrErr)
+ return ToOrErr.takeError();
+ ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToOrErr, *ToTypeOrErr);
+ }
+
+ case TemplateArgument::NullPtr: {
+ ExpectedType ToTypeOrErr = import(From.getNullPtrType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true);
+ }
+
+ case TemplateArgument::Template: {
+ Expected<TemplateName> ToTemplateOrErr = import(From.getAsTemplate());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
+
+ return TemplateArgument(*ToTemplateOrErr);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ Expected<TemplateName> ToTemplateOrErr =
+ import(From.getAsTemplateOrTemplatePattern());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
+
+ return TemplateArgument(
+ *ToTemplateOrErr, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (ExpectedExpr ToExpr = import(From.getAsExpr()))
+ return TemplateArgument(*ToExpr);
+ else
+ return ToExpr.takeError();
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (Error Err = ImportTemplateArguments(
+ From.pack_begin(), From.pack_size(), ToPack))
+ return std::move(Err);
+
+ return TemplateArgument(
+ llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+template <>
+Expected<TemplateArgumentLoc>
+ASTNodeImporter::import(const TemplateArgumentLoc &TALoc) {
+ Expected<TemplateArgument> ArgOrErr = import(TALoc.getArgument());
+ if (!ArgOrErr)
+ return ArgOrErr.takeError();
+ TemplateArgument Arg = *ArgOrErr;
+
+ TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo();
+
+ TemplateArgumentLocInfo ToInfo;
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ ExpectedExpr E = import(FromInfo.getAsExpr());
+ if (!E)
+ return E.takeError();
+ ToInfo = TemplateArgumentLocInfo(*E);
+ } else if (Arg.getKind() == TemplateArgument::Type) {
+ if (auto TSIOrErr = import(FromInfo.getAsTypeSourceInfo()))
+ ToInfo = TemplateArgumentLocInfo(*TSIOrErr);
+ else
+ return TSIOrErr.takeError();
+ } else {
+ auto ToTemplateQualifierLocOrErr =
+ import(FromInfo.getTemplateQualifierLoc());
+ if (!ToTemplateQualifierLocOrErr)
+ return ToTemplateQualifierLocOrErr.takeError();
+ auto ToTemplateNameLocOrErr = import(FromInfo.getTemplateNameLoc());
+ if (!ToTemplateNameLocOrErr)
+ return ToTemplateNameLocOrErr.takeError();
+ auto ToTemplateEllipsisLocOrErr =
+ import(FromInfo.getTemplateEllipsisLoc());
+ if (!ToTemplateEllipsisLocOrErr)
+ return ToTemplateEllipsisLocOrErr.takeError();
+
+ ToInfo = TemplateArgumentLocInfo(
+ *ToTemplateQualifierLocOrErr,
+ *ToTemplateNameLocOrErr,
+ *ToTemplateEllipsisLocOrErr);
+ }
+
+ return TemplateArgumentLoc(Arg, ToInfo);
+}
+
+template <>
+Expected<DeclGroupRef> ASTNodeImporter::import(const DeclGroupRef &DG) {
+ if (DG.isNull())
+ return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
+ size_t NumDecls = DG.end() - DG.begin();
+ SmallVector<Decl *, 1> ToDecls;
+ ToDecls.reserve(NumDecls);
+ for (Decl *FromD : DG) {
+ if (auto ToDOrErr = import(FromD))
+ ToDecls.push_back(*ToDOrErr);
+ else
+ return ToDOrErr.takeError();
+ }
+ return DeclGroupRef::Create(Importer.getToContext(),
+ ToDecls.begin(),
+ NumDecls);
+}
+
+template <>
+Expected<ASTNodeImporter::Designator>
+ASTNodeImporter::import(const Designator &D) {
+ if (D.isFieldDesignator()) {
+ IdentifierInfo *ToFieldName = Importer.Import(D.getFieldName());
+
+ ExpectedSLoc ToDotLocOrErr = import(D.getDotLoc());
+ if (!ToDotLocOrErr)
+ return ToDotLocOrErr.takeError();
+
+ ExpectedSLoc ToFieldLocOrErr = import(D.getFieldLoc());
+ if (!ToFieldLocOrErr)
+ return ToFieldLocOrErr.takeError();
+
+ return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
+ }
+
+ ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc());
+ if (!ToLBracketLocOrErr)
+ return ToLBracketLocOrErr.takeError();
+
+ ExpectedSLoc ToRBracketLocOrErr = import(D.getRBracketLoc());
+ if (!ToRBracketLocOrErr)
+ return ToRBracketLocOrErr.takeError();
+
+ if (D.isArrayDesignator())
+ return Designator(D.getFirstExprIndex(),
+ *ToLBracketLocOrErr, *ToRBracketLocOrErr);
+
+ ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc());
+ if (!ToEllipsisLocOrErr)
+ return ToEllipsisLocOrErr.takeError();
+
+ assert(D.isArrayRangeDesignator());
+ return Designator(
+ D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
+ *ToRBracketLocOrErr);
+}
+
+template <>
+Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) {
+ VarDecl *Var = nullptr;
+ if (From.capturesVariable()) {
+ if (auto VarOrErr = import(From.getCapturedVar()))
+ Var = *VarOrErr;
+ else
+ return VarOrErr.takeError();
+ }
+
+ auto LocationOrErr = import(From.getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+
+ SourceLocation EllipsisLoc;
+ if (From.isPackExpansion())
+ if (Error Err = importInto(EllipsisLoc, From.getEllipsisLoc()))
+ return std::move(Err);
+
+ return LambdaCapture(
+ *LocationOrErr, From.isImplicit(), From.getCaptureKind(), Var,
+ EllipsisLoc);
}
} // namespace clang
@@ -568,26 +952,30 @@ ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
using namespace clang;
-QualType ASTNodeImporter::VisitType(const Type *T) {
+ExpectedType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
- return {};
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-QualType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
- QualType UnderlyingType = Importer.Import(T->getValueType());
- if(UnderlyingType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
+ ExpectedType UnderlyingTypeOrErr = import(T->getValueType());
+ if (!UnderlyingTypeOrErr)
+ return UnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getAtomicType(UnderlyingType);
+ return Importer.getToContext().getAtomicType(*UnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
+ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().Id##Ty;
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -627,183 +1015,196 @@ QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
llvm_unreachable("Invalid BuiltinType Kind!");
}
-QualType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
- QualType OrigT = Importer.Import(T->getOriginalType());
- if (OrigT.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
+ ExpectedType ToOriginalTypeOrErr = import(T->getOriginalType());
+ if (!ToOriginalTypeOrErr)
+ return ToOriginalTypeOrErr.takeError();
- return Importer.getToContext().getDecayedType(OrigT);
+ return Importer.getToContext().getDecayedType(*ToOriginalTypeOrErr);
}
-QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getComplexType(ToElementType);
+ return Importer.getToContext().getComplexType(*ToElementTypeOrErr);
}
-QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitPointerType(const PointerType *T) {
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getPointerType(ToPointeeType);
+ return Importer.getToContext().getPointerType(*ToPointeeTypeOrErr);
}
-QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
+ExpectedType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
// FIXME: Check for blocks support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getBlockPointerType(ToPointeeType);
+ return Importer.getToContext().getBlockPointerType(*ToPointeeTypeOrErr);
}
-QualType
+ExpectedType
ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
// FIXME: Check for C++ support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getLValueReferenceType(ToPointeeType);
+ return Importer.getToContext().getLValueReferenceType(*ToPointeeTypeOrErr);
}
-QualType
+ExpectedType
ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
// FIXME: Check for C++0x support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeTypeAsWritten());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getRValueReferenceType(ToPointeeType);
+ return Importer.getToContext().getRValueReferenceType(*ToPointeeTypeOrErr);
}
-QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
+ExpectedType
+ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
// FIXME: Check for C++ support in "to" context.
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
- return Importer.getToContext().getMemberPointerType(ToPointeeType,
- ClassType.getTypePtr());
+ ExpectedType ClassTypeOrErr = import(QualType(T->getClass(), 0));
+ if (!ClassTypeOrErr)
+ return ClassTypeOrErr.takeError();
+
+ return Importer.getToContext().getMemberPointerType(
+ *ToPointeeTypeOrErr, (*ClassTypeOrErr).getTypePtr());
}
-QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getConstantArrayType(ToElementType,
+ return Importer.getToContext().getConstantArrayType(*ToElementTypeOrErr,
T->getSize(),
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getIncompleteArrayType(ToElementType,
+ return Importer.getToContext().getIncompleteArrayType(*ToElementTypeOrErr,
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
-QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
-
- Expr *Size = Importer.Import(T->getSizeExpr());
- if (!Size)
- return {};
+ExpectedType
+ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
+ QualType ToElementType;
+ Expr *ToSizeExpr;
+ SourceRange ToBracketsRange;
+ if (auto Imp = importSeq(
+ T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
+ std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
+ else
+ return Imp.takeError();
- SourceRange Brackets = Importer.Import(T->getBracketsRange());
- return Importer.getToContext().getVariableArrayType(ToElementType, Size,
- T->getSizeModifier(),
- T->getIndexTypeCVRQualifiers(),
- Brackets);
+ return Importer.getToContext().getVariableArrayType(
+ ToElementType, ToSizeExpr, T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(), ToBracketsRange);
}
-QualType ASTNodeImporter::VisitDependentSizedArrayType(
+ExpectedType ASTNodeImporter::VisitDependentSizedArrayType(
const DependentSizedArrayType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
-
+ QualType ToElementType;
+ Expr *ToSizeExpr;
+ SourceRange ToBracketsRange;
+ if (auto Imp = importSeq(
+ T->getElementType(), T->getSizeExpr(), T->getBracketsRange()))
+ std::tie(ToElementType, ToSizeExpr, ToBracketsRange) = *Imp;
+ else
+ return Imp.takeError();
// SizeExpr may be null if size is not specified directly.
// For example, 'int a[]'.
- Expr *Size = Importer.Import(T->getSizeExpr());
- if (!Size && T->getSizeExpr())
- return {};
- SourceRange Brackets = Importer.Import(T->getBracketsRange());
return Importer.getToContext().getDependentSizedArrayType(
- ToElementType, Size, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(),
- Brackets);
+ ToElementType, ToSizeExpr, T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(), ToBracketsRange);
}
-QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitVectorType(const VectorType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getVectorType(ToElementType,
+ return Importer.getToContext().getVectorType(*ToElementTypeOrErr,
T->getNumElements(),
T->getVectorKind());
}
-QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
- QualType ToElementType = Importer.Import(T->getElementType());
- if (ToElementType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
- return Importer.getToContext().getExtVectorType(ToElementType,
+ return Importer.getToContext().getExtVectorType(*ToElementTypeOrErr,
T->getNumElements());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
// FIXME: What happens if we're importing a function without a prototype
// into C++? Should we make it variadic?
- QualType ToResultType = Importer.Import(T->getReturnType());
- if (ToResultType.isNull())
- return {};
+ ExpectedType ToReturnTypeOrErr = import(T->getReturnType());
+ if (!ToReturnTypeOrErr)
+ return ToReturnTypeOrErr.takeError();
- return Importer.getToContext().getFunctionNoProtoType(ToResultType,
+ return Importer.getToContext().getFunctionNoProtoType(*ToReturnTypeOrErr,
T->getExtInfo());
}
-QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
- QualType ToResultType = Importer.Import(T->getReturnType());
- if (ToResultType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ ExpectedType ToReturnTypeOrErr = import(T->getReturnType());
+ if (!ToReturnTypeOrErr)
+ return ToReturnTypeOrErr.takeError();
// Import argument types
SmallVector<QualType, 4> ArgTypes;
for (const auto &A : T->param_types()) {
- QualType ArgType = Importer.Import(A);
- if (ArgType.isNull())
- return {};
- ArgTypes.push_back(ArgType);
+ ExpectedType TyOrErr = import(A);
+ if (!TyOrErr)
+ return TyOrErr.takeError();
+ ArgTypes.push_back(*TyOrErr);
}
// Import exception types
SmallVector<QualType, 4> ExceptionTypes;
for (const auto &E : T->exceptions()) {
- QualType ExceptionType = Importer.Import(E);
- if (ExceptionType.isNull())
- return {};
- ExceptionTypes.push_back(ExceptionType);
+ ExpectedType TyOrErr = import(E);
+ if (!TyOrErr)
+ return TyOrErr.takeError();
+ ExceptionTypes.push_back(*TyOrErr);
}
FunctionProtoType::ExtProtoInfo FromEPI = T->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo ToEPI;
+ auto Imp = importSeq(
+ FromEPI.ExceptionSpec.NoexceptExpr,
+ FromEPI.ExceptionSpec.SourceDecl,
+ FromEPI.ExceptionSpec.SourceTemplate);
+ if (!Imp)
+ return Imp.takeError();
+
ToEPI.ExtInfo = FromEPI.ExtInfo;
ToEPI.Variadic = FromEPI.Variadic;
ToEPI.HasTrailingReturn = FromEPI.HasTrailingReturn;
@@ -811,112 +1212,108 @@ QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
ToEPI.RefQualifier = FromEPI.RefQualifier;
ToEPI.ExceptionSpec.Type = FromEPI.ExceptionSpec.Type;
ToEPI.ExceptionSpec.Exceptions = ExceptionTypes;
- ToEPI.ExceptionSpec.NoexceptExpr =
- Importer.Import(FromEPI.ExceptionSpec.NoexceptExpr);
- ToEPI.ExceptionSpec.SourceDecl = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpec.SourceDecl));
- ToEPI.ExceptionSpec.SourceTemplate = cast_or_null<FunctionDecl>(
- Importer.Import(FromEPI.ExceptionSpec.SourceTemplate));
+ std::tie(
+ ToEPI.ExceptionSpec.NoexceptExpr,
+ ToEPI.ExceptionSpec.SourceDecl,
+ ToEPI.ExceptionSpec.SourceTemplate) = *Imp;
- return Importer.getToContext().getFunctionType(ToResultType, ArgTypes, ToEPI);
+ return Importer.getToContext().getFunctionType(
+ *ToReturnTypeOrErr, ArgTypes, ToEPI);
}
-QualType ASTNodeImporter::VisitUnresolvedUsingType(
+ExpectedType ASTNodeImporter::VisitUnresolvedUsingType(
const UnresolvedUsingType *T) {
- const auto *ToD =
- cast_or_null<UnresolvedUsingTypenameDecl>(Importer.Import(T->getDecl()));
- if (!ToD)
- return {};
-
- auto *ToPrevD =
- cast_or_null<UnresolvedUsingTypenameDecl>(
- Importer.Import(T->getDecl()->getPreviousDecl()));
- if (!ToPrevD && T->getDecl()->getPreviousDecl())
- return {};
+ UnresolvedUsingTypenameDecl *ToD;
+ Decl *ToPrevD;
+ if (auto Imp = importSeq(T->getDecl(), T->getDecl()->getPreviousDecl()))
+ std::tie(ToD, ToPrevD) = *Imp;
+ else
+ return Imp.takeError();
- return Importer.getToContext().getTypeDeclType(ToD, ToPrevD);
+ return Importer.getToContext().getTypeDeclType(
+ ToD, cast_or_null<TypeDecl>(ToPrevD));
}
-QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
- QualType ToInnerType = Importer.Import(T->getInnerType());
- if (ToInnerType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) {
+ ExpectedType ToInnerTypeOrErr = import(T->getInnerType());
+ if (!ToInnerTypeOrErr)
+ return ToInnerTypeOrErr.takeError();
- return Importer.getToContext().getParenType(ToInnerType);
+ return Importer.getToContext().getParenType(*ToInnerTypeOrErr);
}
-QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
- auto *ToDecl =
- dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
+ Expected<TypedefNameDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTypeDeclType(ToDecl);
+ return Importer.getToContext().getTypeDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
- Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
- if (!ToExpr)
- return {};
+ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
+ if (!ToExprOrErr)
+ return ToExprOrErr.takeError();
- return Importer.getToContext().getTypeOfExprType(ToExpr);
+ return Importer.getToContext().getTypeOfExprType(*ToExprOrErr);
}
-QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
- QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
- if (ToUnderlyingType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getTypeOfType(ToUnderlyingType);
+ return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
// FIXME: Make sure that the "to" context supports C++0x!
- Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
- if (!ToExpr)
- return {};
+ ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
+ if (!ToExprOrErr)
+ return ToExprOrErr.takeError();
- QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
- if (UnderlyingType.isNull())
- return {};
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
+ return Importer.getToContext().getDecltypeType(
+ *ToExprOrErr, *ToUnderlyingTypeOrErr);
}
-QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
- QualType ToBaseType = Importer.Import(T->getBaseType());
- QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
- if (ToBaseType.isNull() || ToUnderlyingType.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ ExpectedType ToBaseTypeOrErr = import(T->getBaseType());
+ if (!ToBaseTypeOrErr)
+ return ToBaseTypeOrErr.takeError();
+
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
- return Importer.getToContext().getUnaryTransformType(ToBaseType,
- ToUnderlyingType,
- T->getUTTKind());
+ return Importer.getToContext().getUnaryTransformType(
+ *ToBaseTypeOrErr, *ToUnderlyingTypeOrErr, T->getUTTKind());
}
-QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) {
// FIXME: Make sure that the "to" context supports C++11!
- QualType FromDeduced = T->getDeducedType();
- QualType ToDeduced;
- if (!FromDeduced.isNull()) {
- ToDeduced = Importer.Import(FromDeduced);
- if (ToDeduced.isNull())
- return {};
- }
+ ExpectedType ToDeducedTypeOrErr = import(T->getDeducedType());
+ if (!ToDeducedTypeOrErr)
+ return ToDeducedTypeOrErr.takeError();
- return Importer.getToContext().getAutoType(ToDeduced, T->getKeyword(),
+ return Importer.getToContext().getAutoType(*ToDeducedTypeOrErr,
+ T->getKeyword(),
/*IsDependent*/false);
}
-QualType ASTNodeImporter::VisitInjectedClassNameType(
+ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
- auto *D = cast_or_null<CXXRecordDecl>(Importer.Import(T->getDecl()));
- if (!D)
- return {};
+ Expected<CXXRecordDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- QualType InjType = Importer.Import(T->getInjectedSpecializationType());
- if (InjType.isNull())
- return {};
+ ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType());
+ if (!ToInjTypeOrErr)
+ return ToInjTypeOrErr.takeError();
// FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading
// See comments in InjectedClassNameType definition for details
@@ -927,293 +1324,287 @@ QualType ASTNodeImporter::VisitInjectedClassNameType(
};
return QualType(new (Importer.getToContext(), TypeAlignment)
- InjectedClassNameType(D, InjType), 0);
+ InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0);
}
-QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
- auto *ToDecl = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ Expected<RecordDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTagDeclType(ToDecl);
+ return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
- auto *ToDecl = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
- if (!ToDecl)
- return {};
+ExpectedType ASTNodeImporter::VisitEnumType(const EnumType *T) {
+ Expected<EnumDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getTagDeclType(ToDecl);
+ return Importer.getToContext().getTagDeclType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
- QualType FromModifiedType = T->getModifiedType();
- QualType FromEquivalentType = T->getEquivalentType();
- QualType ToModifiedType;
- QualType ToEquivalentType;
-
- if (!FromModifiedType.isNull()) {
- ToModifiedType = Importer.Import(FromModifiedType);
- if (ToModifiedType.isNull())
- return {};
- }
- if (!FromEquivalentType.isNull()) {
- ToEquivalentType = Importer.Import(FromEquivalentType);
- if (ToEquivalentType.isNull())
- return {};
- }
+ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) {
+ ExpectedType ToModifiedTypeOrErr = import(T->getModifiedType());
+ if (!ToModifiedTypeOrErr)
+ return ToModifiedTypeOrErr.takeError();
+ ExpectedType ToEquivalentTypeOrErr = import(T->getEquivalentType());
+ if (!ToEquivalentTypeOrErr)
+ return ToEquivalentTypeOrErr.takeError();
return Importer.getToContext().getAttributedType(T->getAttrKind(),
- ToModifiedType, ToEquivalentType);
+ *ToModifiedTypeOrErr, *ToEquivalentTypeOrErr);
}
-QualType ASTNodeImporter::VisitTemplateTypeParmType(
+ExpectedType ASTNodeImporter::VisitTemplateTypeParmType(
const TemplateTypeParmType *T) {
- auto *ParmDecl =
- cast_or_null<TemplateTypeParmDecl>(Importer.Import(T->getDecl()));
- if (!ParmDecl && T->getDecl())
- return {};
+ Expected<TemplateTypeParmDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
return Importer.getToContext().getTemplateTypeParmType(
- T->getDepth(), T->getIndex(), T->isParameterPack(), ParmDecl);
+ T->getDepth(), T->getIndex(), T->isParameterPack(), *ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitSubstTemplateTypeParmType(
+ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmType(
const SubstTemplateTypeParmType *T) {
- const auto *Replaced =
- cast_or_null<TemplateTypeParmType>(Importer.Import(
- QualType(T->getReplacedParameter(), 0)).getTypePtr());
- if (!Replaced)
- return {};
+ ExpectedType ReplacedOrErr = import(QualType(T->getReplacedParameter(), 0));
+ if (!ReplacedOrErr)
+ return ReplacedOrErr.takeError();
+ const TemplateTypeParmType *Replaced =
+ cast<TemplateTypeParmType>((*ReplacedOrErr).getTypePtr());
- QualType Replacement = Importer.Import(T->getReplacementType());
- if (Replacement.isNull())
- return {};
- Replacement = Replacement.getCanonicalType();
+ ExpectedType ToReplacementTypeOrErr = import(T->getReplacementType());
+ if (!ToReplacementTypeOrErr)
+ return ToReplacementTypeOrErr.takeError();
return Importer.getToContext().getSubstTemplateTypeParmType(
- Replaced, Replacement);
+ Replaced, (*ToReplacementTypeOrErr).getCanonicalType());
}
-QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
- TemplateName ToTemplate = Importer.Import(T->getTemplateName());
- if (ToTemplate.isNull())
- return {};
+ auto ToTemplateOrErr = import(T->getTemplateName());
+ if (!ToTemplateOrErr)
+ return ToTemplateOrErr.takeError();
SmallVector<TemplateArgument, 2> ToTemplateArgs;
- if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
- return {};
+ if (Error Err = ImportTemplateArguments(
+ T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return std::move(Err);
QualType ToCanonType;
if (!QualType(T, 0).isCanonical()) {
QualType FromCanonType
= Importer.getFromContext().getCanonicalType(QualType(T, 0));
- ToCanonType =Importer.Import(FromCanonType);
- if (ToCanonType.isNull())
- return {};
+ if (ExpectedType TyOrErr = import(FromCanonType))
+ ToCanonType = *TyOrErr;
+ else
+ return TyOrErr.takeError();
}
- return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ return Importer.getToContext().getTemplateSpecializationType(*ToTemplateOrErr,
ToTemplateArgs,
ToCanonType);
}
-QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
- NestedNameSpecifier *ToQualifier = nullptr;
+ExpectedType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
// Note: the qualifier in an ElaboratedType is optional.
- if (T->getQualifier()) {
- ToQualifier = Importer.Import(T->getQualifier());
- if (!ToQualifier)
- return {};
- }
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- QualType ToNamedType = Importer.Import(T->getNamedType());
- if (ToNamedType.isNull())
- return {};
+ ExpectedType ToNamedTypeOrErr = import(T->getNamedType());
+ if (!ToNamedTypeOrErr)
+ return ToNamedTypeOrErr.takeError();
- TagDecl *OwnedTagDecl =
- cast_or_null<TagDecl>(Importer.Import(T->getOwnedTagDecl()));
- if (!OwnedTagDecl && T->getOwnedTagDecl())
- return {};
+ Expected<TagDecl *> ToOwnedTagDeclOrErr = import(T->getOwnedTagDecl());
+ if (!ToOwnedTagDeclOrErr)
+ return ToOwnedTagDeclOrErr.takeError();
return Importer.getToContext().getElaboratedType(T->getKeyword(),
- ToQualifier, ToNamedType,
- OwnedTagDecl);
+ *ToQualifierOrErr,
+ *ToNamedTypeOrErr,
+ *ToOwnedTagDeclOrErr);
}
-QualType ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
- QualType Pattern = Importer.Import(T->getPattern());
- if (Pattern.isNull())
- return {};
+ExpectedType
+ASTNodeImporter::VisitPackExpansionType(const PackExpansionType *T) {
+ ExpectedType ToPatternOrErr = import(T->getPattern());
+ if (!ToPatternOrErr)
+ return ToPatternOrErr.takeError();
- return Importer.getToContext().getPackExpansionType(Pattern,
+ return Importer.getToContext().getPackExpansionType(*ToPatternOrErr,
T->getNumExpansions());
}
-QualType ASTNodeImporter::VisitDependentTemplateSpecializationType(
+ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType *T) {
- NestedNameSpecifier *Qualifier = Importer.Import(T->getQualifier());
- if (!Qualifier && T->getQualifier())
- return {};
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
- IdentifierInfo *Name = Importer.Import(T->getIdentifier());
- if (!Name && T->getIdentifier())
- return {};
+ IdentifierInfo *ToName = Importer.Import(T->getIdentifier());
SmallVector<TemplateArgument, 2> ToPack;
ToPack.reserve(T->getNumArgs());
- if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToPack))
- return {};
+ if (Error Err = ImportTemplateArguments(
+ T->getArgs(), T->getNumArgs(), ToPack))
+ return std::move(Err);
return Importer.getToContext().getDependentTemplateSpecializationType(
- T->getKeyword(), Qualifier, Name, ToPack);
+ T->getKeyword(), *ToQualifierOrErr, ToName, ToPack);
}
-QualType ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) {
- NestedNameSpecifier *NNS = Importer.Import(T->getQualifier());
- if (!NNS && T->getQualifier())
- return QualType();
+ExpectedType
+ASTNodeImporter::VisitDependentNameType(const DependentNameType *T) {
+ auto ToQualifierOrErr = import(T->getQualifier());
+ if (!ToQualifierOrErr)
+ return ToQualifierOrErr.takeError();
IdentifierInfo *Name = Importer.Import(T->getIdentifier());
- if (!Name && T->getIdentifier())
- return QualType();
- QualType Canon = (T == T->getCanonicalTypeInternal().getTypePtr())
- ? QualType()
- : Importer.Import(T->getCanonicalTypeInternal());
- if (!Canon.isNull())
- Canon = Canon.getCanonicalType();
+ QualType Canon;
+ if (T != T->getCanonicalTypeInternal().getTypePtr()) {
+ if (ExpectedType TyOrErr = import(T->getCanonicalTypeInternal()))
+ Canon = (*TyOrErr).getCanonicalType();
+ else
+ return TyOrErr.takeError();
+ }
- return Importer.getToContext().getDependentNameType(T->getKeyword(), NNS,
+ return Importer.getToContext().getDependentNameType(T->getKeyword(),
+ *ToQualifierOrErr,
Name, Canon);
}
-QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
- auto *Class =
- dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
- if (!Class)
- return {};
+ExpectedType
+ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ Expected<ObjCInterfaceDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
- return Importer.getToContext().getObjCInterfaceType(Class);
+ return Importer.getToContext().getObjCInterfaceType(*ToDeclOrErr);
}
-QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
- QualType ToBaseType = Importer.Import(T->getBaseType());
- if (ToBaseType.isNull())
- return {};
+ExpectedType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
+ ExpectedType ToBaseTypeOrErr = import(T->getBaseType());
+ if (!ToBaseTypeOrErr)
+ return ToBaseTypeOrErr.takeError();
SmallVector<QualType, 4> TypeArgs;
for (auto TypeArg : T->getTypeArgsAsWritten()) {
- QualType ImportedTypeArg = Importer.Import(TypeArg);
- if (ImportedTypeArg.isNull())
- return {};
-
- TypeArgs.push_back(ImportedTypeArg);
+ if (ExpectedType TyOrErr = import(TypeArg))
+ TypeArgs.push_back(*TyOrErr);
+ else
+ return TyOrErr.takeError();
}
SmallVector<ObjCProtocolDecl *, 4> Protocols;
for (auto *P : T->quals()) {
- auto *Protocol = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(P));
- if (!Protocol)
- return {};
- Protocols.push_back(Protocol);
+ if (Expected<ObjCProtocolDecl *> ProtocolOrErr = import(P))
+ Protocols.push_back(*ProtocolOrErr);
+ else
+ return ProtocolOrErr.takeError();
+
}
- return Importer.getToContext().getObjCObjectType(ToBaseType, TypeArgs,
+ return Importer.getToContext().getObjCObjectType(*ToBaseTypeOrErr, TypeArgs,
Protocols,
T->isKindOfTypeAsWritten());
}
-QualType
+ExpectedType
ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
- QualType ToPointeeType = Importer.Import(T->getPointeeType());
- if (ToPointeeType.isNull())
- return {};
+ ExpectedType ToPointeeTypeOrErr = import(T->getPointeeType());
+ if (!ToPointeeTypeOrErr)
+ return ToPointeeTypeOrErr.takeError();
- return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
+ return Importer.getToContext().getObjCObjectPointerType(*ToPointeeTypeOrErr);
}
//----------------------------------------------------------------------------
// Import Declarations
//----------------------------------------------------------------------------
-bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
- DeclContext *&LexicalDC,
- DeclarationName &Name,
- NamedDecl *&ToD,
- SourceLocation &Loc) {
+Error ASTNodeImporter::ImportDeclParts(
+ NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC,
+ DeclarationName &Name, NamedDecl *&ToD, SourceLocation &Loc) {
// Check if RecordDecl is in FunctionDecl parameters to avoid infinite loop.
// example: int struct_in_proto(struct data_t{int a;int b;} *d);
DeclContext *OrigDC = D->getDeclContext();
FunctionDecl *FunDecl;
if (isa<RecordDecl>(D) && (FunDecl = dyn_cast<FunctionDecl>(OrigDC)) &&
FunDecl->hasBody()) {
- SourceRange RecR = D->getSourceRange();
- SourceRange BodyR = FunDecl->getBody()->getSourceRange();
- // If RecordDecl is not in Body (it is a param), we bail out.
- if (RecR.isValid() && BodyR.isValid() &&
- (RecR.getBegin() < BodyR.getBegin() ||
- BodyR.getEnd() < RecR.getEnd())) {
- Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
- << D->getDeclKindName();
- return true;
+ auto getLeafPointeeType = [](const Type *T) {
+ while (T->isPointerType() || T->isArrayType()) {
+ T = T->getPointeeOrArrayElementType();
+ }
+ return T;
+ };
+ for (const ParmVarDecl *P : FunDecl->parameters()) {
+ const Type *LeafT =
+ getLeafPointeeType(P->getType().getCanonicalType().getTypePtr());
+ auto *RT = dyn_cast<RecordType>(LeafT);
+ if (RT && RT->getDecl() == D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ }
}
}
// Import the context of this declaration.
- DC = Importer.ImportContext(OrigDC);
- if (!DC)
- return true;
-
- LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return true;
- }
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return Err;
// Import the name of this declaration.
- Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return true;
+ if (Error Err = importInto(Name, D->getDeclName()))
+ return Err;
// Import the location of this declaration.
- Loc = Importer.Import(D->getLocation());
+ if (Error Err = importInto(Loc, D->getLocation()))
+ return Err;
+
ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D));
- return false;
+ if (ToD)
+ if (Error Err = ASTNodeImporter(*this).ImportDefinitionIfNeeded(D, ToD))
+ return Err;
+
+ return Error::success();
}
-void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
+Error ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
if (!FromD)
- return;
+ return Error::success();
- if (!ToD) {
- ToD = Importer.Import(FromD);
- if (!ToD)
- return;
- }
-
- if (auto *FromRecord = dyn_cast<RecordDecl>(FromD)) {
- if (auto *ToRecord = cast_or_null<RecordDecl>(ToD)) {
- if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() && !ToRecord->getDefinition()) {
- ImportDefinition(FromRecord, ToRecord);
+ if (!ToD)
+ if (Error Err = importInto(ToD, FromD))
+ return Err;
+
+ if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (RecordDecl *ToRecord = cast<RecordDecl>(ToD)) {
+ if (FromRecord->getDefinition() && FromRecord->isCompleteDefinition() &&
+ !ToRecord->getDefinition()) {
+ if (Error Err = ImportDefinition(FromRecord, ToRecord))
+ return Err;
}
}
- return;
+ return Error::success();
}
- if (auto *FromEnum = dyn_cast<EnumDecl>(FromD)) {
- if (auto *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (EnumDecl *ToEnum = cast<EnumDecl>(ToD)) {
if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
- ImportDefinition(FromEnum, ToEnum);
+ if (Error Err = ImportDefinition(FromEnum, ToEnum))
+ return Err;
}
}
- return;
+ return Error::success();
}
+
+ return Error::success();
}
-void
-ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
- DeclarationNameInfo& To) {
+Error
+ASTNodeImporter::ImportDeclarationNameLoc(
+ const DeclarationNameInfo &From, DeclarationNameInfo& To) {
// NOTE: To.Name and To.Loc are already imported.
// We only have to import To.LocInfo.
switch (To.getName().getNameKind()) {
@@ -1223,76 +1614,121 @@ ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXUsingDirective:
case DeclarationName::CXXDeductionGuideName:
- return;
+ return Error::success();
case DeclarationName::CXXOperatorName: {
- SourceRange Range = From.getCXXOperatorNameRange();
- To.setCXXOperatorNameRange(Importer.Import(Range));
- return;
+ if (auto ToRangeOrErr = import(From.getCXXOperatorNameRange()))
+ To.setCXXOperatorNameRange(*ToRangeOrErr);
+ else
+ return ToRangeOrErr.takeError();
+ return Error::success();
}
case DeclarationName::CXXLiteralOperatorName: {
- SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
- To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
- return;
+ if (ExpectedSLoc LocOrErr = import(From.getCXXLiteralOperatorNameLoc()))
+ To.setCXXLiteralOperatorNameLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+ return Error::success();
}
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName: {
- TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
- To.setNamedTypeInfo(Importer.Import(FromTInfo));
- return;
+ if (auto ToTInfoOrErr = import(From.getNamedTypeInfo()))
+ To.setNamedTypeInfo(*ToTInfoOrErr);
+ else
+ return ToTInfoOrErr.takeError();
+ return Error::success();
}
}
llvm_unreachable("Unknown name kind.");
}
-void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+Error
+ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
if (Importer.isMinimalImport() && !ForceImport) {
- Importer.ImportContext(FromDC);
- return;
+ auto ToDCOrErr = Importer.ImportContext(FromDC);
+ return ToDCOrErr.takeError();
+ }
+ llvm::SmallVector<Decl *, 8> ImportedDecls;
+ for (auto *From : FromDC->decls()) {
+ ExpectedDecl ImportedOrErr = import(From);
+ if (!ImportedOrErr)
+ // Ignore the error, continue with next Decl.
+ // FIXME: Handle this case somehow better.
+ consumeError(ImportedOrErr.takeError());
}
- for (auto *From : FromDC->decls())
- Importer.Import(From);
+ return Error::success();
+}
+
+Error ASTNodeImporter::ImportDeclContext(
+ Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) {
+ auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext());
+ if (!ToDCOrErr)
+ return ToDCOrErr.takeError();
+ ToDC = *ToDCOrErr;
+
+ if (FromD->getDeclContext() != FromD->getLexicalDeclContext()) {
+ auto ToLexicalDCOrErr = Importer.ImportContext(
+ FromD->getLexicalDeclContext());
+ if (!ToLexicalDCOrErr)
+ return ToLexicalDCOrErr.takeError();
+ ToLexicalDC = *ToLexicalDCOrErr;
+ } else
+ ToLexicalDC = ToDC;
+
+ return Error::success();
}
-void ASTNodeImporter::ImportImplicitMethods(
+Error ASTNodeImporter::ImportImplicitMethods(
const CXXRecordDecl *From, CXXRecordDecl *To) {
assert(From->isCompleteDefinition() && To->getDefinition() == To &&
"Import implicit methods to or from non-definition");
for (CXXMethodDecl *FromM : From->methods())
- if (FromM->isImplicit())
- Importer.Import(FromM);
+ if (FromM->isImplicit()) {
+ Expected<CXXMethodDecl *> ToMOrErr = import(FromM);
+ if (!ToMOrErr)
+ return ToMOrErr.takeError();
+ }
+
+ return Error::success();
}
-static void setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
- ASTImporter &Importer) {
+static Error setTypedefNameForAnonDecl(TagDecl *From, TagDecl *To,
+ ASTImporter &Importer) {
if (TypedefNameDecl *FromTypedef = From->getTypedefNameForAnonDecl()) {
- auto *ToTypedef =
- cast_or_null<TypedefNameDecl>(Importer.Import(FromTypedef));
- assert (ToTypedef && "Failed to import typedef of an anonymous structure");
-
- To->setTypedefNameForAnonDecl(ToTypedef);
+ Decl *ToTypedef = Importer.Import(FromTypedef);
+ if (!ToTypedef)
+ return make_error<ImportError>();
+ To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToTypedef));
+ // FIXME: This should be the final code.
+ //if (Expected<Decl *> ToTypedefOrErr = Importer.Import(FromTypedef))
+ // To->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(*ToTypedefOrErr));
+ //else
+ // return ToTypedefOrErr.takeError();
}
+ return Error::success();
}
-bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ RecordDecl *From, RecordDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
- ImportDeclContext(From, /*ForceImport=*/true);
+ return ImportDeclContext(From, /*ForceImport=*/true);
- return false;
+ return Error::success();
}
To->startDefinition();
- setTypedefNameForAnonDecl(From, To, Importer);
+ if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
+ return Err;
// Add base classes.
- if (auto *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
- auto *FromCXX = cast<CXXRecordDecl>(From);
+ auto *ToCXX = dyn_cast<CXXRecordDecl>(To);
+ auto *FromCXX = dyn_cast<CXXRecordDecl>(From);
+ if (ToCXX && FromCXX && ToCXX->dataPtr() && FromCXX->dataPtr()) {
struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
@@ -1364,228 +1800,172 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
SmallVector<CXXBaseSpecifier *, 4> Bases;
for (const auto &Base1 : FromCXX->bases()) {
- QualType T = Importer.Import(Base1.getType());
- if (T.isNull())
- return true;
+ ExpectedType TyOrErr = import(Base1.getType());
+ if (!TyOrErr)
+ return TyOrErr.takeError();
SourceLocation EllipsisLoc;
- if (Base1.isPackExpansion())
- EllipsisLoc = Importer.Import(Base1.getEllipsisLoc());
+ if (Base1.isPackExpansion()) {
+ if (ExpectedSLoc LocOrErr = import(Base1.getEllipsisLoc()))
+ EllipsisLoc = *LocOrErr;
+ else
+ return LocOrErr.takeError();
+ }
// Ensure that we have a definition for the base.
- ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl());
+ if (Error Err =
+ ImportDefinitionIfNeeded(Base1.getType()->getAsCXXRecordDecl()))
+ return Err;
+
+ auto RangeOrErr = import(Base1.getSourceRange());
+ if (!RangeOrErr)
+ return RangeOrErr.takeError();
+
+ auto TSIOrErr = import(Base1.getTypeSourceInfo());
+ if (!TSIOrErr)
+ return TSIOrErr.takeError();
Bases.push_back(
- new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1.getSourceRange()),
- Base1.isVirtual(),
- Base1.isBaseOfClass(),
- Base1.getAccessSpecifierAsWritten(),
- Importer.Import(Base1.getTypeSourceInfo()),
- EllipsisLoc));
+ new (Importer.getToContext()) CXXBaseSpecifier(
+ *RangeOrErr,
+ Base1.isVirtual(),
+ Base1.isBaseOfClass(),
+ Base1.getAccessSpecifierAsWritten(),
+ *TSIOrErr,
+ EllipsisLoc));
}
if (!Bases.empty())
ToCXX->setBases(Bases.data(), Bases.size());
}
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
To->completeDefinition();
- return false;
+ return Error::success();
}
-bool ASTNodeImporter::ImportDefinition(VarDecl *From, VarDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportInitializer(VarDecl *From, VarDecl *To) {
if (To->getAnyInitializer())
- return false;
+ return Error::success();
- // FIXME: Can we really import any initializer? Alternatively, we could force
- // ourselves to import every declaration of a variable and then only use
- // getInit() here.
- To->setInit(Importer.Import(const_cast<Expr *>(From->getAnyInitializer())));
+ Expr *FromInit = From->getInit();
+ if (!FromInit)
+ return Error::success();
- // FIXME: Other bits to merge?
+ ExpectedExpr ToInitOrErr = import(FromInit);
+ if (!ToInitOrErr)
+ return ToInitOrErr.takeError();
+
+ To->setInit(*ToInitOrErr);
+ if (From->isInitKnownICE()) {
+ EvaluatedStmt *Eval = To->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = From->isInitICE();
+ }
- return false;
+ // FIXME: Other bits to merge?
+ return Error::success();
}
-bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ EnumDecl *From, EnumDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition() || To->isBeingDefined()) {
if (Kind == IDK_Everything)
- ImportDeclContext(From, /*ForceImport=*/true);
- return false;
+ return ImportDeclContext(From, /*ForceImport=*/true);
+ return Error::success();
}
To->startDefinition();
- setTypedefNameForAnonDecl(From, To, Importer);
+ if (Error Err = setTypedefNameForAnonDecl(From, To, Importer))
+ return Err;
- QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
- if (T.isNull())
- return true;
+ ExpectedType ToTypeOrErr =
+ import(Importer.getFromContext().getTypeDeclType(From));
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- QualType ToPromotionType = Importer.Import(From->getPromotionType());
- if (ToPromotionType.isNull())
- return true;
+ ExpectedType ToPromotionTypeOrErr = import(From->getPromotionType());
+ if (!ToPromotionTypeOrErr)
+ return ToPromotionTypeOrErr.takeError();
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
// FIXME: we might need to merge the number of positive or negative bits
// if the enumerator lists don't match.
- To->completeDefinition(T, ToPromotionType,
+ To->completeDefinition(*ToTypeOrErr, *ToPromotionTypeOrErr,
From->getNumPositiveBits(),
From->getNumNegativeBits());
- return false;
+ return Error::success();
}
-TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
- TemplateParameterList *Params) {
+// FIXME: Remove this, use `import` instead.
+Expected<TemplateParameterList *> ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
SmallVector<NamedDecl *, 4> ToParams(Params->size());
- if (ImportContainerChecked(*Params, ToParams))
- return nullptr;
+ if (Error Err = ImportContainerChecked(*Params, ToParams))
+ return std::move(Err);
Expr *ToRequiresClause;
if (Expr *const R = Params->getRequiresClause()) {
- ToRequiresClause = Importer.Import(R);
- if (!ToRequiresClause)
- return nullptr;
+ if (Error Err = importInto(ToRequiresClause, R))
+ return std::move(Err);
} else {
ToRequiresClause = nullptr;
}
- return TemplateParameterList::Create(Importer.getToContext(),
- Importer.Import(Params->getTemplateLoc()),
- Importer.Import(Params->getLAngleLoc()),
- ToParams,
- Importer.Import(Params->getRAngleLoc()),
- ToRequiresClause);
-}
-
-TemplateArgument
-ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
- switch (From.getKind()) {
- case TemplateArgument::Null:
- return TemplateArgument();
-
- case TemplateArgument::Type: {
- QualType ToType = Importer.Import(From.getAsType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(ToType);
- }
-
- case TemplateArgument::Integral: {
- QualType ToType = Importer.Import(From.getIntegralType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(From, ToType);
- }
-
- case TemplateArgument::Declaration: {
- auto *To = cast_or_null<ValueDecl>(Importer.Import(From.getAsDecl()));
- QualType ToType = Importer.Import(From.getParamTypeForDecl());
- if (!To || ToType.isNull())
- return {};
- return TemplateArgument(To, ToType);
- }
-
- case TemplateArgument::NullPtr: {
- QualType ToType = Importer.Import(From.getNullPtrType());
- if (ToType.isNull())
- return {};
- return TemplateArgument(ToType, /*isNullPtr*/true);
- }
-
- case TemplateArgument::Template: {
- TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
- if (ToTemplate.isNull())
- return {};
-
- return TemplateArgument(ToTemplate);
- }
-
- case TemplateArgument::TemplateExpansion: {
- TemplateName ToTemplate
- = Importer.Import(From.getAsTemplateOrTemplatePattern());
- if (ToTemplate.isNull())
- return {};
-
- return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
- }
-
- case TemplateArgument::Expression:
- if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
- return TemplateArgument(ToExpr);
- return TemplateArgument();
-
- case TemplateArgument::Pack: {
- SmallVector<TemplateArgument, 2> ToPack;
- ToPack.reserve(From.pack_size());
- if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
- return {};
-
- return TemplateArgument(
- llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
- }
- }
-
- llvm_unreachable("Invalid template argument kind");
-}
-
-Optional<TemplateArgumentLoc>
-ASTNodeImporter::ImportTemplateArgumentLoc(const TemplateArgumentLoc &TALoc) {
- TemplateArgument Arg = ImportTemplateArgument(TALoc.getArgument());
- TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo();
- TemplateArgumentLocInfo ToInfo;
- if (Arg.getKind() == TemplateArgument::Expression) {
- Expr *E = Importer.Import(FromInfo.getAsExpr());
- ToInfo = TemplateArgumentLocInfo(E);
- if (!E)
- return None;
- } else if (Arg.getKind() == TemplateArgument::Type) {
- if (TypeSourceInfo *TSI = Importer.Import(FromInfo.getAsTypeSourceInfo()))
- ToInfo = TemplateArgumentLocInfo(TSI);
+ auto ToTemplateLocOrErr = import(Params->getTemplateLoc());
+ if (!ToTemplateLocOrErr)
+ return ToTemplateLocOrErr.takeError();
+ auto ToLAngleLocOrErr = import(Params->getLAngleLoc());
+ if (!ToLAngleLocOrErr)
+ return ToLAngleLocOrErr.takeError();
+ auto ToRAngleLocOrErr = import(Params->getRAngleLoc());
+ if (!ToRAngleLocOrErr)
+ return ToRAngleLocOrErr.takeError();
+
+ return TemplateParameterList::Create(
+ Importer.getToContext(),
+ *ToTemplateLocOrErr,
+ *ToLAngleLocOrErr,
+ ToParams,
+ *ToRAngleLocOrErr,
+ ToRequiresClause);
+}
+
+Error ASTNodeImporter::ImportTemplateArguments(
+ const TemplateArgument *FromArgs, unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ if (auto ToOrErr = import(FromArgs[I]))
+ ToArgs.push_back(*ToOrErr);
else
- return None;
- } else {
- ToInfo = TemplateArgumentLocInfo(
- Importer.Import(FromInfo.getTemplateQualifierLoc()),
- Importer.Import(FromInfo.getTemplateNameLoc()),
- Importer.Import(FromInfo.getTemplateEllipsisLoc()));
+ return ToOrErr.takeError();
}
- return TemplateArgumentLoc(Arg, ToInfo);
-}
-
-bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
- unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs) {
- for (unsigned I = 0; I != NumFromArgs; ++I) {
- TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
- if (To.isNull() && !FromArgs[I].isNull())
- return true;
- ToArgs.push_back(To);
- }
+ return Error::success();
+}
- return false;
+// FIXME: Do not forget to remove this and use only 'import'.
+Expected<TemplateArgument>
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ return import(From);
}
-// We cannot use Optional<> pattern here and below because
-// TemplateArgumentListInfo's operator new is declared as deleted so it cannot
-// be stored in Optional.
template <typename InContainerTy>
-bool ASTNodeImporter::ImportTemplateArgumentListInfo(
+Error ASTNodeImporter::ImportTemplateArgumentListInfo(
const InContainerTy &Container, TemplateArgumentListInfo &ToTAInfo) {
for (const auto &FromLoc : Container) {
- if (auto ToLoc = ImportTemplateArgumentLoc(FromLoc))
- ToTAInfo.addArgument(*ToLoc);
+ if (auto ToLocOrErr = import(FromLoc))
+ ToTAInfo.addArgument(*ToLocOrErr);
else
- return true;
+ return ToLocOrErr.takeError();
}
- return false;
+ return Error::success();
}
static StructuralEquivalenceKind
@@ -1682,30 +2062,31 @@ bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
return Ctx.IsEquivalent(From, To);
}
-Decl *ASTNodeImporter::VisitDecl(Decl *D) {
+ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Decl *ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
- // Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+}
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
+ // Import the context of this declaration.
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
+ ExpectedSLoc LocOrErr = import(D->getLocation());
+ if (!LocOrErr)
+ return LocOrErr.takeError();
EmptyDecl *ToD;
- if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, Loc))
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, *LocOrErr))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1713,7 +2094,7 @@ Decl *ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
TranslationUnitDecl *ToD =
Importer.getToContext().getTranslationUnitDecl();
@@ -1722,18 +2103,23 @@ Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
- SourceLocation Loc = Importer.Import(D->getLocation());
- SourceLocation ColonLoc = Importer.Import(D->getColonLoc());
+ExpectedDecl ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ ExpectedSLoc LocOrErr = import(D->getLocation());
+ if (!LocOrErr)
+ return LocOrErr.takeError();
+ auto ColonLocOrErr = import(D->getColonLoc());
+ if (!ColonLocOrErr)
+ return ColonLocOrErr.takeError();
// Import the context of this declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ auto DCOrErr = Importer.ImportContext(D->getDeclContext());
+ if (!DCOrErr)
+ return DCOrErr.takeError();
+ DeclContext *DC = *DCOrErr;
AccessSpecDecl *ToD;
if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), D->getAccess(),
- DC, Loc, ColonLoc))
+ DC, *LocOrErr, *ColonLocOrErr))
return ToD;
// Lexical DeclContext and Semantic DeclContext
@@ -1744,29 +2130,26 @@ Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
-
+ExpectedDecl ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ auto DCOrErr = Importer.ImportContext(D->getDeclContext());
+ if (!DCOrErr)
+ return DCOrErr.takeError();
+ DeclContext *DC = *DCOrErr;
DeclContext *LexicalDC = DC;
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- Expr *AssertExpr = Importer.Import(D->getAssertExpr());
- if (!AssertExpr)
- return nullptr;
-
- StringLiteral *FromMsg = D->getMessage();
- auto *ToMsg = cast_or_null<StringLiteral>(Importer.Import(FromMsg));
- if (!ToMsg && FromMsg)
- return nullptr;
+ SourceLocation ToLocation, ToRParenLoc;
+ Expr *ToAssertExpr;
+ StringLiteral *ToMessage;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAssertExpr(), D->getMessage(), D->getRParenLoc()))
+ std::tie(ToLocation, ToAssertExpr, ToMessage, ToRParenLoc) = *Imp;
+ else
+ return Imp.takeError();
StaticAssertDecl *ToD;
if (GetImportedOrCreateDecl(
- ToD, D, Importer.getToContext(), DC, Loc, AssertExpr, ToMsg,
- Importer.Import(D->getRParenLoc()), D->isFailed()))
+ ToD, D, Importer.getToContext(), DC, ToLocation, ToAssertExpr, ToMessage,
+ ToRParenLoc, D->isFailed()))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1774,14 +2157,14 @@ Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -1796,8 +2179,7 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
} else {
SmallVector<NamedDecl *, 4> ConflictingDecls;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Namespace))
continue;
@@ -1815,15 +2197,21 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
// Create the "to" namespace, if needed.
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
if (GetImportedOrCreateDecl(
ToNamespace, D, Importer.getToContext(), DC, D->isInline(),
- Importer.Import(D->getLocStart()), Loc, Name.getAsIdentifierInfo(),
+ *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
/*PrevDecl=*/nullptr))
return ToNamespace;
ToNamespace->setLexicalDeclContext(LexicalDC);
@@ -1840,43 +2228,42 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
}
Importer.MapImported(D, ToNamespace);
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
return ToNamespace;
}
-Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *LookupD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, LookupD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, LookupD, Loc))
+ return std::move(Err);
if (LookupD)
return LookupD;
// NOTE: No conflict resolution is done for namespace aliases now.
- auto *TargetDecl = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNamespace()));
- if (!TargetDecl)
- return nullptr;
-
- IdentifierInfo *ToII = Importer.Import(D->getIdentifier());
- if (!ToII)
- return nullptr;
-
- NestedNameSpecifierLoc ToQLoc = Importer.Import(D->getQualifierLoc());
- if (D->getQualifierLoc() && !ToQLoc)
- return nullptr;
+ SourceLocation ToNamespaceLoc, ToAliasLoc, ToTargetNameLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ NamespaceDecl *ToNamespace;
+ if (auto Imp = importSeq(
+ D->getNamespaceLoc(), D->getAliasLoc(), D->getQualifierLoc(),
+ D->getTargetNameLoc(), D->getNamespace()))
+ std::tie(
+ ToNamespaceLoc, ToAliasLoc, ToQualifierLoc, ToTargetNameLoc,
+ ToNamespace) = *Imp;
+ else
+ return Imp.takeError();
+ IdentifierInfo *ToIdentifier = Importer.Import(D->getIdentifier());
NamespaceAliasDecl *ToD;
- if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC,
- Importer.Import(D->getNamespaceLoc()),
- Importer.Import(D->getAliasLoc()), ToII, ToQLoc,
- Importer.Import(D->getTargetNameLoc()),
- TargetDecl))
+ if (GetImportedOrCreateDecl(
+ ToD, D, Importer.getToContext(), DC, ToNamespaceLoc, ToAliasLoc,
+ ToIdentifier, ToQualifierLoc, ToTargetNameLoc, ToNamespace))
return ToD;
ToD->setLexicalDeclContext(LexicalDC);
@@ -1885,14 +2272,15 @@ Decl *ASTNodeImporter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
return ToD;
}
-Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
+ExpectedDecl
+ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -1902,15 +2290,21 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
if (auto *FoundTypedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
- if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
- FoundTypedef->getUnderlyingType()))
- return Importer.MapImported(D, FoundTypedef);
+ QualType FromUT = D->getUnderlyingType();
+ QualType FoundUT = FoundTypedef->getUnderlyingType();
+ if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) {
+ // If the "From" context has a complete underlying type but we
+ // already have a complete underlying type then return with that.
+ if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType())
+ return Importer.MapImported(D, FoundTypedef);
+ }
+ // FIXME Handle redecl chain.
+ break;
}
ConflictingDecls.push_back(FoundDecl);
@@ -1921,28 +2315,30 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the underlying type of this typedef;
- QualType T = Importer.Import(D->getUnderlyingType());
- if (T.isNull())
- return nullptr;
+ QualType ToUnderlyingType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToBeginLoc;
+ if (auto Imp = importSeq(
+ D->getUnderlyingType(), D->getTypeSourceInfo(), D->getBeginLoc()))
+ std::tie(ToUnderlyingType, ToTypeSourceInfo, ToBeginLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the new typedef node.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- SourceLocation StartL = Importer.Import(D->getLocStart());
-
+ // FIXME: ToUnderlyingType is not used.
TypedefNameDecl *ToTypedef;
if (IsAlias) {
if (GetImportedOrCreateDecl<TypeAliasDecl>(
- ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
- Name.getAsIdentifierInfo(), TInfo))
+ ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc,
+ Name.getAsIdentifierInfo(), ToTypeSourceInfo))
return ToTypedef;
} else if (GetImportedOrCreateDecl<TypedefDecl>(
- ToTypedef, D, Importer.getToContext(), DC, StartL, Loc,
- Name.getAsIdentifierInfo(), TInfo))
+ ToTypedef, D, Importer.getToContext(), DC, ToBeginLoc, Loc,
+ Name.getAsIdentifierInfo(), ToTypeSourceInfo))
return ToTypedef;
ToTypedef->setAccess(D->getAccess());
@@ -1956,22 +2352,23 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
return ToTypedef;
}
-Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/false);
}
-Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
return VisitTypedefNameDecl(D, /*IsAlias=*/true);
}
-Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *FoundD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, FoundD, Loc))
+ return std::move(Err);
if (FoundD)
return FoundD;
@@ -1981,8 +2378,7 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
@@ -1996,26 +2392,23 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- TemplateParameterList *Params = ImportTemplateParameterList(
- D->getTemplateParameters());
- if (!Params)
- return nullptr;
-
- auto *TemplDecl = cast_or_null<TypeAliasDecl>(
- Importer.Import(D->getTemplatedDecl()));
- if (!TemplDecl)
- return nullptr;
+ TemplateParameterList *ToTemplateParameters;
+ TypeAliasDecl *ToTemplatedDecl;
+ if (auto Imp = importSeq(D->getTemplateParameters(), D->getTemplatedDecl()))
+ std::tie(ToTemplateParameters, ToTemplatedDecl) = *Imp;
+ else
+ return Imp.takeError();
TypeAliasTemplateDecl *ToAlias;
if (GetImportedOrCreateDecl(ToAlias, D, Importer.getToContext(), DC, Loc,
- Name, Params, TemplDecl))
+ Name, ToTemplateParameters, ToTemplatedDecl))
return ToAlias;
- TemplDecl->setDescribedAliasTemplate(ToAlias);
+ ToTemplatedDecl->setDescribedAliasTemplate(ToAlias);
ToAlias->setAccess(D->getAccess());
ToAlias->setLexicalDeclContext(LexicalDC);
@@ -2023,48 +2416,53 @@ Decl *ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
return ToAlias;
}
-Decl *ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitLabelDecl(LabelDecl *D) {
// Import the major distinguishing characteristics of this label.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
assert(LexicalDC->isFunctionOrMethod());
LabelDecl *ToLabel;
- if (D->isGnuLocal()
- ? GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo(),
- Importer.Import(D->getLocStart()))
- : GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()),
- Name.getAsIdentifierInfo()))
- return ToLabel;
-
- auto *Label = cast_or_null<LabelStmt>(Importer.Import(D->getStmt()));
- if (!Label)
- return nullptr;
+ if (D->isGnuLocal()) {
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+ if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(), *BeginLocOrErr))
+ return ToLabel;
+
+ } else {
+ if (GetImportedOrCreateDecl(ToLabel, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo()))
+ return ToLabel;
+
+ }
- ToLabel->setStmt(Label);
+ Expected<LabelStmt *> ToStmtOrErr = import(D->getStmt());
+ if (!ToStmtOrErr)
+ return ToStmtOrErr.takeError();
+
+ ToLabel->setStmt(*ToStmtOrErr);
ToLabel->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToLabel);
return ToLabel;
}
-Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// Import the major distinguishing characteristics of this enum.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2072,7 +2470,9 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
- SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ if (Error Err = importInto(
+ SearchName, D->getTypedefNameForAnonDecl()->getDeclName()))
+ return std::move(Err);
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary;
@@ -2080,19 +2480,18 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
// We may already have an enum of the same name; try to find and match it.
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(SearchName, FoundDecls);
+ auto FoundDecls =
+ Importer.findDeclsInToCtx(DC, SearchName);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- Decl *Found = FoundDecl;
- if (auto *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (auto *Typedef = dyn_cast<TypedefNameDecl>(FoundDecl)) {
if (const auto *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
- Found = Tag->getDecl();
+ FoundDecl = Tag->getDecl();
}
- if (auto *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) {
if (IsStructuralMatch(D, FoundEnum))
return Importer.MapImported(D, FoundEnum);
}
@@ -2104,53 +2503,69 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ SourceLocation ToBeginLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ QualType ToIntegerType;
+ if (auto Imp = importSeq(
+ D->getBeginLoc(), D->getQualifierLoc(), D->getIntegerType()))
+ std::tie(ToBeginLoc, ToQualifierLoc, ToIntegerType) = *Imp;
+ else
+ return Imp.takeError();
+
// Create the enum declaration.
EnumDecl *D2;
if (GetImportedOrCreateDecl(
- D2, D, Importer.getToContext(), DC, Importer.Import(D->getLocStart()),
+ D2, D, Importer.getToContext(), DC, ToBeginLoc,
Loc, Name.getAsIdentifierInfo(), nullptr, D->isScoped(),
D->isScopedUsingClassTag(), D->isFixed()))
return D2;
- // Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setQualifierInfo(ToQualifierLoc);
+ D2->setIntegerType(ToIntegerType);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
- // Import the integer type.
- QualType ToIntegerType = Importer.Import(D->getIntegerType());
- if (ToIntegerType.isNull())
- return nullptr;
- D2->setIntegerType(ToIntegerType);
-
// Import the definition
- if (D->isCompleteDefinition() && ImportDefinition(D, D2))
- return nullptr;
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ bool IsFriendTemplate = false;
+ if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
+ IsFriendTemplate =
+ DCXX->getDescribedClassTemplate() &&
+ DCXX->getDescribedClassTemplate()->getFriendObjectKind() !=
+ Decl::FOK_None;
+ }
+
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
if (Definition && Definition != D &&
+ // Friend template declaration must be imported on its own.
+ !IsFriendTemplate &&
// In contrast to a normal CXXRecordDecl, the implicit
// CXXRecordDecl of ClassTemplateSpecializationDecl is its redeclaration.
// The definition of the implicit CXXRecordDecl in this case is the
// ClassTemplateSpecializationDecl itself. Thus, we start with an extra
// condition in order to be able to import the implict Decl.
!D->isImplicit()) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
+ ExpectedDecl ImportedDefOrErr = import(Definition);
+ if (!ImportedDefOrErr)
+ return ImportedDefOrErr.takeError();
- return Importer.MapImported(D, ImportedDef);
+ return Importer.MapImported(D, *ImportedDefOrErr);
}
// Import the major distinguishing characteristics of this record.
@@ -2158,8 +2573,8 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2167,19 +2582,19 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
unsigned IDNS = Decl::IDNS_Tag;
DeclarationName SearchName = Name;
if (!SearchName && D->getTypedefNameForAnonDecl()) {
- SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ if (Error Err = importInto(
+ SearchName, D->getTypedefNameForAnonDecl()->getDeclName()))
+ return std::move(Err);
IDNS = Decl::IDNS_Ordinary;
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
- IDNS |= Decl::IDNS_Ordinary;
+ IDNS |= Decl::IDNS_Ordinary | Decl::IDNS_TagFriend;
// We may already have a record of the same name; try to find and match it.
- RecordDecl *AdoptDecl = nullptr;
RecordDecl *PrevDecl = nullptr;
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(SearchName, FoundDecls);
-
+ auto FoundDecls =
+ Importer.findDeclsInToCtx(DC, SearchName);
if (!FoundDecls.empty()) {
// We're going to have to compare D against potentially conflicting Decls, so complete it.
if (D->hasExternalLexicalStorage() && !D->isCompleteDefinition())
@@ -2196,30 +2611,23 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Found = Tag->getDecl();
}
- if (D->getDescribedTemplate()) {
- if (auto *Template = dyn_cast<ClassTemplateDecl>(Found))
- Found = Template->getTemplatedDecl();
- else
- continue;
- }
-
if (auto *FoundRecord = dyn_cast<RecordDecl>(Found)) {
- if (!SearchName) {
+ // Do not emit false positive diagnostic in case of unnamed
+ // struct/union and in case of anonymous structs. Would be false
+ // because there may be several anonymous/unnamed structs in a class.
+ // E.g. these are both valid:
+ // struct A { // unnamed structs
+ // struct { struct A *next; } entry0;
+ // struct { struct A *next; } entry1;
+ // };
+ // struct X { struct { int a; }; struct { int b; }; }; // anon structs
+ if (!SearchName)
if (!IsStructuralMatch(D, FoundRecord, false))
continue;
- }
- PrevDecl = FoundRecord;
-
- if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
- if ((SearchName && !D->isCompleteDefinition())
- || (D->isCompleteDefinition() &&
- D->isAnonymousStructOrUnion()
- == FoundDef->isAnonymousStructOrUnion() &&
- IsStructuralMatch(D, FoundDef))) {
- // The record types structurally match, or the "from" translation
- // unit only had a forward declaration anyway; call it the same
- // function.
+ if (IsStructuralMatch(D, FoundRecord)) {
+ RecordDecl *FoundDef = FoundRecord->getDefinition();
+ if (D->isThisDeclarationADefinition() && FoundDef) {
// FIXME: Structural equivalence check should check for same
// user-defined methods.
Importer.MapImported(D, FoundDef);
@@ -2227,178 +2635,178 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
auto *FoundCXX = dyn_cast<CXXRecordDecl>(FoundDef);
assert(FoundCXX && "Record type mismatch");
- if (D->isCompleteDefinition() && !Importer.isMinimalImport())
+ if (!Importer.isMinimalImport())
// FoundDef may not have every implicit method that D has
// because implicit methods are created only if they are used.
- ImportImplicitMethods(DCXX, FoundCXX);
+ if (Error Err = ImportImplicitMethods(DCXX, FoundCXX))
+ return std::move(Err);
}
- return FoundDef;
}
- } else if (!D->isCompleteDefinition()) {
- // We have a forward declaration of this type, so adopt that forward
- // declaration rather than building a new one.
-
- // If one or both can be completed from external storage then try one
- // last time to complete and compare them before doing this.
-
- if (FoundRecord->hasExternalLexicalStorage() &&
- !FoundRecord->isCompleteDefinition())
- FoundRecord->getASTContext().getExternalSource()->CompleteType(FoundRecord);
- if (D->hasExternalLexicalStorage())
- D->getASTContext().getExternalSource()->CompleteType(D);
-
- if (FoundRecord->isCompleteDefinition() &&
- D->isCompleteDefinition() &&
- !IsStructuralMatch(D, FoundRecord))
- continue;
-
- AdoptDecl = FoundRecord;
- continue;
- } else if (!SearchName) {
- continue;
+ PrevDecl = FoundRecord->getMostRecentDecl();
+ break;
}
}
ConflictingDecls.push_back(FoundDecl);
- }
+ } // for
if (!ConflictingDecls.empty() && SearchName) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
+ if (!Name)
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
// Create the record declaration.
- RecordDecl *D2 = AdoptDecl;
- SourceLocation StartLoc = Importer.Import(D->getLocStart());
- if (!D2) {
- CXXRecordDecl *D2CXX = nullptr;
- if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
- if (DCXX->isLambda()) {
- TypeSourceInfo *TInfo = Importer.Import(DCXX->getLambdaTypeInfo());
- if (GetImportedOrCreateSpecialDecl(
- D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
- DC, TInfo, Loc, DCXX->isDependentLambda(),
- DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
- return D2CXX;
- Decl *CDecl = Importer.Import(DCXX->getLambdaContextDecl());
- if (DCXX->getLambdaContextDecl() && !CDecl)
- return nullptr;
- D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), CDecl);
- } else if (DCXX->isInjectedClassName()) {
- // We have to be careful to do a similar dance to the one in
- // Sema::ActOnStartCXXMemberDeclarations
- CXXRecordDecl *const PrevDecl = nullptr;
- const bool DelayTypeCreation = true;
- if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
- Name.getAsIdentifierInfo(), PrevDecl,
- DelayTypeCreation))
- return D2CXX;
- Importer.getToContext().getTypeDeclType(
- D2CXX, dyn_cast<CXXRecordDecl>(DC));
- } else {
- if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
- Name.getAsIdentifierInfo(),
- cast_or_null<CXXRecordDecl>(PrevDecl)))
- return D2CXX;
- }
+ RecordDecl *D2 = nullptr;
+ CXXRecordDecl *D2CXX = nullptr;
+ if (auto *DCXX = dyn_cast<CXXRecordDecl>(D)) {
+ if (DCXX->isLambda()) {
+ auto TInfoOrErr = import(DCXX->getLambdaTypeInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
+ if (GetImportedOrCreateSpecialDecl(
+ D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
+ DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(),
+ DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
+ return D2CXX;
+ ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl());
+ if (!CDeclOrErr)
+ return CDeclOrErr.takeError();
+ D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr);
+ } else if (DCXX->isInjectedClassName()) {
+ // We have to be careful to do a similar dance to the one in
+ // Sema::ActOnStartCXXMemberDeclarations
+ const bool DelayTypeCreation = true;
+ if (GetImportedOrCreateDecl(
+ D2CXX, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
+ cast_or_null<CXXRecordDecl>(PrevDecl), DelayTypeCreation))
+ return D2CXX;
+ Importer.getToContext().getTypeDeclType(
+ D2CXX, dyn_cast<CXXRecordDecl>(DC));
+ } else {
+ if (GetImportedOrCreateDecl(D2CXX, D, Importer.getToContext(),
+ D->getTagKind(), DC, *BeginLocOrErr, Loc,
+ Name.getAsIdentifierInfo(),
+ cast_or_null<CXXRecordDecl>(PrevDecl)))
+ return D2CXX;
+ }
- D2 = D2CXX;
- D2->setAccess(D->getAccess());
- D2->setLexicalDeclContext(LexicalDC);
- if (!DCXX->getDescribedClassTemplate() || DCXX->isImplicit())
- LexicalDC->addDeclInternal(D2);
-
- if (ClassTemplateDecl *FromDescribed =
- DCXX->getDescribedClassTemplate()) {
- auto *ToDescribed = cast_or_null<ClassTemplateDecl>(
- Importer.Import(FromDescribed));
- if (!ToDescribed)
- return nullptr;
- D2CXX->setDescribedClassTemplate(ToDescribed);
- if (!DCXX->isInjectedClassName()) {
- // In a record describing a template the type should be an
- // InjectedClassNameType (see Sema::CheckClassTemplate). Update the
- // previously set type to the correct value here (ToDescribed is not
- // available at record create).
- // FIXME: The previous type is cleared but not removed from
- // ASTContext's internal storage.
- CXXRecordDecl *Injected = nullptr;
- for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
- auto *Record = dyn_cast<CXXRecordDecl>(Found);
- if (Record && Record->isInjectedClassName()) {
- Injected = Record;
- break;
- }
- }
- D2CXX->setTypeForDecl(nullptr);
- Importer.getToContext().getInjectedClassNameType(D2CXX,
- ToDescribed->getInjectedClassNameSpecialization());
- if (Injected) {
- Injected->setTypeForDecl(nullptr);
- Importer.getToContext().getTypeDeclType(Injected, D2CXX);
+ D2 = D2CXX;
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ if (!DCXX->getDescribedClassTemplate() || DCXX->isImplicit())
+ LexicalDC->addDeclInternal(D2);
+
+ if (LexicalDC != DC && D->isInIdentifierNamespace(Decl::IDNS_TagFriend))
+ DC->makeDeclVisibleInContext(D2);
+
+ if (ClassTemplateDecl *FromDescribed =
+ DCXX->getDescribedClassTemplate()) {
+ ClassTemplateDecl *ToDescribed;
+ if (Error Err = importInto(ToDescribed, FromDescribed))
+ return std::move(Err);
+ D2CXX->setDescribedClassTemplate(ToDescribed);
+ if (!DCXX->isInjectedClassName() && !IsFriendTemplate) {
+ // In a record describing a template the type should be an
+ // InjectedClassNameType (see Sema::CheckClassTemplate). Update the
+ // previously set type to the correct value here (ToDescribed is not
+ // available at record create).
+ // FIXME: The previous type is cleared but not removed from
+ // ASTContext's internal storage.
+ CXXRecordDecl *Injected = nullptr;
+ for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
+ auto *Record = dyn_cast<CXXRecordDecl>(Found);
+ if (Record && Record->isInjectedClassName()) {
+ Injected = Record;
+ break;
}
}
- } else if (MemberSpecializationInfo *MemberInfo =
+ // Create an injected type for the whole redecl chain.
+ SmallVector<Decl *, 2> Redecls =
+ getCanonicalForwardRedeclChain(D2CXX);
+ for (auto *R : Redecls) {
+ auto *RI = cast<CXXRecordDecl>(R);
+ RI->setTypeForDecl(nullptr);
+ // Below we create a new injected type and assign that to the
+ // canonical decl, subsequent declarations in the chain will reuse
+ // that type.
+ Importer.getToContext().getInjectedClassNameType(
+ RI, ToDescribed->getInjectedClassNameSpecialization());
+ }
+ // Set the new type for the previous injected decl too.
+ if (Injected) {
+ Injected->setTypeForDecl(nullptr);
+ Importer.getToContext().getTypeDeclType(Injected, D2CXX);
+ }
+ }
+ } else if (MemberSpecializationInfo *MemberInfo =
DCXX->getMemberSpecializationInfo()) {
TemplateSpecializationKind SK =
MemberInfo->getTemplateSpecializationKind();
CXXRecordDecl *FromInst = DCXX->getInstantiatedFromMemberClass();
- auto *ToInst =
- cast_or_null<CXXRecordDecl>(Importer.Import(FromInst));
- if (FromInst && !ToInst)
- return nullptr;
- D2CXX->setInstantiationOfMemberClass(ToInst, SK);
- D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation(
- Importer.Import(MemberInfo->getPointOfInstantiation()));
- }
- } else {
- if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(),
- D->getTagKind(), DC, StartLoc, Loc,
- Name.getAsIdentifierInfo(), PrevDecl))
- return D2;
- D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+
+ if (Expected<CXXRecordDecl *> ToInstOrErr = import(FromInst))
+ D2CXX->setInstantiationOfMemberClass(*ToInstOrErr, SK);
+ else
+ return ToInstOrErr.takeError();
+
+ if (ExpectedSLoc POIOrErr =
+ import(MemberInfo->getPointOfInstantiation()))
+ D2CXX->getMemberSpecializationInfo()->setPointOfInstantiation(
+ *POIOrErr);
+ else
+ return POIOrErr.takeError();
}
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
- if (D->isAnonymousStructOrUnion())
- D2->setAnonymousStructOrUnion(true);
+ } else {
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(),
+ D->getTagKind(), DC, *BeginLocOrErr, Loc,
+ Name.getAsIdentifierInfo(), PrevDecl))
+ return D2;
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
}
- Importer.MapImported(D, D2);
+ if (auto QualifierLocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*QualifierLocOrErr);
+ else
+ return QualifierLocOrErr.takeError();
- if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
- return nullptr;
+ if (D->isAnonymousStructOrUnion())
+ D2->setAnonymousStructOrUnion(true);
+
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2, IDK_Default))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
// Import the major distinguishing characteristics of this enumerator.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
// Determine whether there are any other declarations with the same name and
// in the same context.
if (!LexicalDC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
@@ -2416,18 +2824,22 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- Expr *Init = Importer.Import(D->getInitExpr());
- if (D->getInitExpr() && !Init)
- return nullptr;
+ ExpectedType TypeOrErr = import(D->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
+
+ ExpectedExpr InitOrErr = import(D->getInitExpr());
+ if (!InitOrErr)
+ return InitOrErr.takeError();
EnumConstantDecl *ToEnumerator;
if (GetImportedOrCreateDecl(
ToEnumerator, D, Importer.getToContext(), cast<EnumDecl>(DC), Loc,
- Name.getAsIdentifierInfo(), T, Init, D->getInitVal()))
+ Name.getAsIdentifierInfo(), *TypeOrErr, *InitOrErr, D->getInitVal()))
return ToEnumerator;
ToEnumerator->setAccess(D->getAccess());
@@ -2436,52 +2848,57 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
return ToEnumerator;
}
-bool ASTNodeImporter::ImportTemplateInformation(FunctionDecl *FromFD,
- FunctionDecl *ToFD) {
+Error ASTNodeImporter::ImportTemplateInformation(
+ FunctionDecl *FromFD, FunctionDecl *ToFD) {
switch (FromFD->getTemplatedKind()) {
case FunctionDecl::TK_NonTemplate:
case FunctionDecl::TK_FunctionTemplate:
- return false;
+ return Error::success();
case FunctionDecl::TK_MemberSpecialization: {
- auto *InstFD = cast_or_null<FunctionDecl>(
- Importer.Import(FromFD->getInstantiatedFromMemberFunction()));
- if (!InstFD)
- return true;
-
TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind();
- SourceLocation POI = Importer.Import(
- FromFD->getMemberSpecializationInfo()->getPointOfInstantiation());
- ToFD->setInstantiationOfMemberFunction(InstFD, TSK);
- ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
- return false;
+
+ if (Expected<FunctionDecl *> InstFDOrErr =
+ import(FromFD->getInstantiatedFromMemberFunction()))
+ ToFD->setInstantiationOfMemberFunction(*InstFDOrErr, TSK);
+ else
+ return InstFDOrErr.takeError();
+
+ if (ExpectedSLoc POIOrErr = import(
+ FromFD->getMemberSpecializationInfo()->getPointOfInstantiation()))
+ ToFD->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+
+ return Error::success();
}
case FunctionDecl::TK_FunctionTemplateSpecialization: {
- FunctionTemplateDecl* Template;
- OptionalTemplateArgsTy ToTemplArgs;
- std::tie(Template, ToTemplArgs) =
+ auto FunctionAndArgsOrErr =
ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
- if (!Template || !ToTemplArgs)
- return true;
+ if (!FunctionAndArgsOrErr)
+ return FunctionAndArgsOrErr.takeError();
TemplateArgumentList *ToTAList = TemplateArgumentList::CreateCopy(
- Importer.getToContext(), *ToTemplArgs);
+ Importer.getToContext(), std::get<1>(*FunctionAndArgsOrErr));
auto *FTSInfo = FromFD->getTemplateSpecializationInfo();
TemplateArgumentListInfo ToTAInfo;
const auto *FromTAArgsAsWritten = FTSInfo->TemplateArgumentsAsWritten;
if (FromTAArgsAsWritten)
- if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ToTAInfo))
- return true;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ *FromTAArgsAsWritten, ToTAInfo))
+ return Err;
- SourceLocation POI = Importer.Import(FTSInfo->getPointOfInstantiation());
+ ExpectedSLoc POIOrErr = import(FTSInfo->getPointOfInstantiation());
+ if (!POIOrErr)
+ return POIOrErr.takeError();
TemplateSpecializationKind TSK = FTSInfo->getTemplateSpecializationKind();
ToFD->setFunctionTemplateSpecialization(
- Template, ToTAList, /* InsertPos= */ nullptr,
- TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, POI);
- return false;
+ std::get<0>(*FunctionAndArgsOrErr), ToTAList, /* InsertPos= */ nullptr,
+ TSK, FromTAArgsAsWritten ? &ToTAInfo : nullptr, *POIOrErr);
+ return Error::success();
}
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
@@ -2489,53 +2906,56 @@ bool ASTNodeImporter::ImportTemplateInformation(FunctionDecl *FromFD,
UnresolvedSet<8> TemplDecls;
unsigned NumTemplates = FromInfo->getNumTemplates();
for (unsigned I = 0; I < NumTemplates; I++) {
- if (auto *ToFTD = cast_or_null<FunctionTemplateDecl>(
- Importer.Import(FromInfo->getTemplate(I))))
- TemplDecls.addDecl(ToFTD);
+ if (Expected<FunctionTemplateDecl *> ToFTDOrErr =
+ import(FromInfo->getTemplate(I)))
+ TemplDecls.addDecl(*ToFTDOrErr);
else
- return true;
+ return ToFTDOrErr.takeError();
}
// Import TemplateArgumentListInfo.
TemplateArgumentListInfo ToTAInfo;
- if (ImportTemplateArgumentListInfo(
- FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
- llvm::makeArrayRef(FromInfo->getTemplateArgs(),
- FromInfo->getNumTemplateArgs()),
- ToTAInfo))
- return true;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
+ llvm::makeArrayRef(
+ FromInfo->getTemplateArgs(), FromInfo->getNumTemplateArgs()),
+ ToTAInfo))
+ return Err;
ToFD->setDependentTemplateSpecialization(Importer.getToContext(),
TemplDecls, ToTAInfo);
- return false;
+ return Error::success();
}
}
llvm_unreachable("All cases should be covered!");
}
-FunctionDecl *
+Expected<FunctionDecl *>
ASTNodeImporter::FindFunctionTemplateSpecialization(FunctionDecl *FromFD) {
- FunctionTemplateDecl* Template;
- OptionalTemplateArgsTy ToTemplArgs;
- std::tie(Template, ToTemplArgs) =
+ auto FunctionAndArgsOrErr =
ImportFunctionTemplateWithTemplateArgsFromSpecialization(FromFD);
- if (!Template || !ToTemplArgs)
- return nullptr;
+ if (!FunctionAndArgsOrErr)
+ return FunctionAndArgsOrErr.takeError();
+ FunctionTemplateDecl *Template;
+ TemplateArgsTy ToTemplArgs;
+ std::tie(Template, ToTemplArgs) = *FunctionAndArgsOrErr;
void *InsertPos = nullptr;
- auto *FoundSpec = Template->findSpecialization(*ToTemplArgs, InsertPos);
+ auto *FoundSpec = Template->findSpecialization(ToTemplArgs, InsertPos);
return FoundSpec;
}
-Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
- SmallVector<Decl*, 2> Redecls = getCanonicalForwardRedeclChain(D);
+ SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D);
auto RedeclIt = Redecls.begin();
// Import the first part of the decl chain. I.e. import all previous
// declarations starting from the canonical decl.
- for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) {
+ ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
+ if (!ToRedeclOrErr)
+ return ToRedeclOrErr.takeError();
+ }
assert(*RedeclIt == D);
// Import the major distinguishing characteristics of this function.
@@ -2543,8 +2963,8 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -2552,16 +2972,18 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
FunctionTemplateDecl *FromFT = D->getDescribedFunctionTemplate();
// If this is a function template specialization, then try to find the same
- // existing specialization in the "to" context. The localUncachedLookup
- // below will not find any specialization, but would find the primary
- // template; thus, we have to skip normal lookup in case of specializations.
+ // existing specialization in the "to" context. The lookup below will not
+ // find any specialization, but would find the primary template; thus, we
+ // have to skip normal lookup in case of specializations.
// FIXME handle member function templates (TK_MemberSpecialization) similarly?
if (D->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- if (FunctionDecl *FoundFunction = FindFunctionTemplateSpecialization(D)) {
- if (D->doesThisDeclarationHaveABody() &&
- FoundFunction->hasBody())
- return Importer.Imported(D, FoundFunction);
+ auto FoundFunctionOrErr = FindFunctionTemplateSpecialization(D);
+ if (!FoundFunctionOrErr)
+ return FoundFunctionOrErr.takeError();
+ if (FunctionDecl *FoundFunction = *FoundFunctionOrErr) {
+ if (D->doesThisDeclarationHaveABody() && FoundFunction->hasBody())
+ return Importer.MapImported(D, FoundFunction);
FoundByLookup = FoundFunction;
}
}
@@ -2570,20 +2992,11 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
else if (!LexicalDC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- // If template was found, look at the templated function.
- if (FromFT) {
- if (auto *Template = dyn_cast<FunctionTemplateDecl>(FoundDecl))
- FoundDecl = Template->getTemplatedDecl();
- else
- continue;
- }
-
if (auto *FoundFunction = dyn_cast<FunctionDecl>(FoundDecl)) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
@@ -2622,13 +3035,14 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
DeclarationNameInfo NameInfo(Name, Loc);
// Import additional name location/type info.
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
QualType FromTy = D->getType();
bool usedDifferentExceptionSpec = false;
@@ -2649,84 +3063,93 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
}
- // Import the type.
- QualType T = Importer.Import(FromTy);
- if (T.isNull())
- return nullptr;
+ QualType T;
+ TypeSourceInfo *TInfo;
+ SourceLocation ToInnerLocStart, ToEndLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ FromTy, D->getTypeSourceInfo(), D->getInnerLocStart(),
+ D->getQualifierLoc(), D->getEndLoc()))
+ std::tie(T, TInfo, ToInnerLocStart, ToQualifierLoc, ToEndLoc) = *Imp;
+ else
+ return Imp.takeError();
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
for (auto P : D->parameters()) {
- auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(P));
- if (!ToP)
- return nullptr;
-
- Parameters.push_back(ToP);
+ if (Expected<ParmVarDecl *> ToPOrErr = import(P))
+ Parameters.push_back(*ToPOrErr);
+ else
+ return ToPOrErr.takeError();
}
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
-
// Create the imported function.
FunctionDecl *ToFunction = nullptr;
- SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart());
if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, FromConstructor->isExplicit(),
- D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(), D->isImplicit(), D->isConstexpr()))
return ToFunction;
- if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) {
- SmallVector<CXXCtorInitializer *, 4> CtorInitializers;
- for (auto *I : FromConstructor->inits()) {
- auto *ToI = cast_or_null<CXXCtorInitializer>(Importer.Import(I));
- if (!ToI && I)
- return nullptr;
- CtorInitializers.push_back(ToI);
- }
- auto **Memory =
- new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers];
- std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory);
- auto *ToCtor = cast<CXXConstructorDecl>(ToFunction);
- ToCtor->setCtorInitializers(Memory);
- ToCtor->setNumCtorInitializers(NumInitializers);
- }
} else if (isa<CXXDestructorDecl>(D)) {
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
- D->isImplicit()))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ D->isImplicit()))
return ToFunction;
} else if (CXXConversionDecl *FromConversion =
dyn_cast<CXXConversionDecl>(D)) {
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
+ ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
FromConversion->isExplicit(), D->isConstexpr(), SourceLocation()))
return ToFunction;
} else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (GetImportedOrCreateDecl<CXXMethodDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- InnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
+ ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
Method->isInlineSpecified(), D->isConstexpr(), SourceLocation()))
return ToFunction;
} else {
if (GetImportedOrCreateDecl(ToFunction, D, Importer.getToContext(), DC,
- InnerLocStart, NameInfo, T, TInfo,
+ ToInnerLocStart, NameInfo, T, TInfo,
D->getStorageClass(), D->isInlineSpecified(),
D->hasWrittenPrototype(), D->isConstexpr()))
return ToFunction;
}
- // Import the qualifier, if any.
- ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ // Connect the redecl chain.
+ if (FoundByLookup) {
+ auto *Recent = const_cast<FunctionDecl *>(
+ FoundByLookup->getMostRecentDecl());
+ ToFunction->setPreviousDecl(Recent);
+ }
+
+ // Import Ctor initializers.
+ if (auto *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (unsigned NumInitializers = FromConstructor->getNumCtorInitializers()) {
+ SmallVector<CXXCtorInitializer *, 4> CtorInitializers(NumInitializers);
+ // Import first, then allocate memory and copy if there was no error.
+ if (Error Err = ImportContainerChecked(
+ FromConstructor->inits(), CtorInitializers))
+ return std::move(Err);
+ auto **Memory =
+ new (Importer.getToContext()) CXXCtorInitializer *[NumInitializers];
+ std::copy(CtorInitializers.begin(), CtorInitializers.end(), Memory);
+ auto *ToCtor = cast<CXXConstructorDecl>(ToFunction);
+ ToCtor->setCtorInitializers(Memory);
+ ToCtor->setNumCtorInitializers(NumInitializers);
+ }
+ }
+
+ ToFunction->setQualifierInfo(ToQualifierLoc);
ToFunction->setAccess(D->getAccess());
ToFunction->setLexicalDeclContext(LexicalDC);
ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
ToFunction->setTrivial(D->isTrivial());
ToFunction->setPure(D->isPure());
- ToFunction->setRangeEnd(Importer.Import(D->getLocEnd()));
+ ToFunction->setRangeEnd(ToEndLoc);
// Set the parameters.
for (auto *Param : Parameters) {
@@ -2735,12 +3158,6 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
ToFunction->setParams(Parameters);
- if (FoundByLookup) {
- auto *Recent = const_cast<FunctionDecl *>(
- FoundByLookup->getMostRecentDecl());
- ToFunction->setPreviousDecl(Recent);
- }
-
// We need to complete creation of FunctionProtoTypeLoc manually with setting
// params it refers to.
if (TInfo) {
@@ -2753,30 +3170,33 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
if (usedDifferentExceptionSpec) {
// Update FunctionProtoType::ExtProtoInfo.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
- ToFunction->setType(T);
+ if (ExpectedType TyOrErr = import(D->getType()))
+ ToFunction->setType(*TyOrErr);
+ else
+ return TyOrErr.takeError();
}
// Import the describing template function, if any.
- if (FromFT)
- if (!Importer.Import(FromFT))
- return nullptr;
+ if (FromFT) {
+ auto ToFTOrErr = import(FromFT);
+ if (!ToFTOrErr)
+ return ToFTOrErr.takeError();
+ }
if (D->doesThisDeclarationHaveABody()) {
if (Stmt *FromBody = D->getBody()) {
- if (Stmt *ToBody = Importer.Import(FromBody)) {
- ToFunction->setBody(ToBody);
- }
+ if (ExpectedStmt ToBodyOrErr = import(FromBody))
+ ToFunction->setBody(*ToBodyOrErr);
+ else
+ return ToBodyOrErr.takeError();
}
}
// FIXME: Other bits to merge?
// If it is a template, import all related things.
- if (ImportTemplateInformation(D, ToFunction))
- return nullptr;
+ if (Error Err = ImportTemplateInformation(D, ToFunction))
+ return std::move(Err);
bool IsFriend = D->isInIdentifierNamespace(Decl::IDNS_OrdinaryFriend);
@@ -2796,133 +3216,139 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
DC->makeDeclVisibleInContext(ToFunction);
}
- // Import the rest of the chain. I.e. import all subsequent declarations.
- for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt)
- if (!Importer.Import(*RedeclIt))
- return nullptr;
-
if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
ImportOverrides(cast<CXXMethodDecl>(ToFunction), FromCXXMethod);
+ // Import the rest of the chain. I.e. import all subsequent declarations.
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
+ ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
+ if (!ToRedeclOrErr)
+ return ToRedeclOrErr.takeError();
+ }
+
return ToFunction;
}
-Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
return VisitFunctionDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
return VisitCXXMethodDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
return VisitCXXMethodDecl(D);
}
-Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
return VisitCXXMethodDecl(D);
}
-static unsigned getFieldIndex(Decl *F) {
- auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
- if (!Owner)
- return 0;
-
- unsigned Index = 1;
- for (const auto *D : Owner->noload_decls()) {
- if (D == F)
- return Index;
-
- if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
- ++Index;
- }
-
- return Index;
-}
-
-Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Determine whether we've already imported this field.
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
- if (auto *FoundField = dyn_cast<FieldDecl>(FoundDecl)) {
+ if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecl)) {
// For anonymous fields, match up by index.
- if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ if (!Name &&
+ ASTImporter::getFieldIndex(D) !=
+ ASTImporter::getFieldIndex(FoundField))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundField->getType())) {
Importer.MapImported(D, FoundField);
+ // In case of a FieldDecl of a ClassTemplateSpecializationDecl, the
+ // initializer of a FieldDecl might not had been instantiated in the
+ // "To" context. However, the "From" context might instantiated that,
+ // thus we have to merge that.
+ if (Expr *FromInitializer = D->getInClassInitializer()) {
+ // We don't have yet the initializer set.
+ if (FoundField->hasInClassInitializer() &&
+ !FoundField->getInClassInitializer()) {
+ if (ExpectedExpr ToInitializerOrErr = import(FromInitializer))
+ FoundField->setInClassInitializer(*ToInitializerOrErr);
+ else {
+ // We can't return error here,
+ // since we already mapped D as imported.
+ // FIXME: warning message?
+ consumeError(ToInitializerOrErr.takeError());
+ return FoundField;
+ }
+ }
+ }
return FoundField;
}
+ // FIXME: Why is this case not handled with calling HandleNameConflict?
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- Expr *BitWidth = Importer.Import(D->getBitWidth());
- if (!BitWidth && D->getBitWidth())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTInfo;
+ Expr *ToBitWidth;
+ SourceLocation ToInnerLocStart;
+ Expr *ToInitializer;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getBitWidth(),
+ D->getInnerLocStart(), D->getInClassInitializer()))
+ std::tie(
+ ToType, ToTInfo, ToBitWidth, ToInnerLocStart, ToInitializer) = *Imp;
+ else
+ return Imp.takeError();
FieldDecl *ToField;
if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo, BitWidth,
- D->isMutable(), D->getInClassInitStyle()))
+ ToInnerLocStart, Loc, Name.getAsIdentifierInfo(),
+ ToType, ToTInfo, ToBitWidth, D->isMutable(),
+ D->getInClassInitStyle()))
return ToField;
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
- if (Expr *FromInitializer = D->getInClassInitializer()) {
- Expr *ToInitializer = Importer.Import(FromInitializer);
- if (ToInitializer)
- ToField->setInClassInitializer(ToInitializer);
- else
- return nullptr;
- }
+ if (ToInitializer)
+ ToField->setInClassInitializer(ToInitializer);
ToField->setImplicit(D->isImplicit());
LexicalDC->addDeclInternal(ToField);
return ToField;
}
-Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Determine whether we've already imported this field.
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (auto *FoundField = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
// For anonymous indirect fields, match up by index.
- if (!Name && getFieldIndex(D) != getFieldIndex(FoundField))
+ if (!Name &&
+ ASTImporter::getFieldIndex(D) !=
+ ASTImporter::getFieldIndex(FoundField))
continue;
if (Importer.IsStructurallyEquivalent(D->getType(),
@@ -2936,39 +3362,40 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
if (!Name && I < N-1)
continue;
+ // FIXME: Why is this case not handled with calling HandleNameConflict?
Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
<< Name << D->getType() << FoundField->getType();
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ auto TypeOrErr = import(D->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
auto **NamedChain =
new (Importer.getToContext()) NamedDecl*[D->getChainingSize()];
unsigned i = 0;
- for (auto *PI : D->chain()) {
- Decl *D = Importer.Import(PI);
- if (!D)
- return nullptr;
- NamedChain[i++] = cast<NamedDecl>(D);
- }
+ for (auto *PI : D->chain())
+ if (Expected<NamedDecl *> ToD = import(PI))
+ NamedChain[i++] = *ToD;
+ else
+ return ToD.takeError();
llvm::MutableArrayRef<NamedDecl *> CH = {NamedChain, D->getChainingSize()};
IndirectFieldDecl *ToIndirectField;
if (GetImportedOrCreateDecl(ToIndirectField, D, Importer.getToContext(), DC,
- Loc, Name.getAsIdentifierInfo(), T, CH))
+ Loc, Name.getAsIdentifierInfo(), *TypeOrErr, CH))
// FIXME here we leak `NamedChain` which is allocated before
return ToIndirectField;
- for (const auto *A : D->attrs())
- ToIndirectField->addAttr(Importer.Import(A));
+ for (const auto *Attr : D->attrs())
+ ToIndirectField->addAttr(Importer.Import(Attr));
ToIndirectField->setAccess(D->getAccess());
ToIndirectField->setLexicalDeclContext(LexicalDC);
@@ -2976,16 +3403,14 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
return ToIndirectField;
}
-Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Import the major distinguishing characteristics of a declaration.
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- DeclContext *LexicalDC = D->getDeclContext() == D->getLexicalDeclContext()
- ? DC : Importer.ImportContext(D->getLexicalDeclContext());
- if (!DC || !LexicalDC)
- return nullptr;
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Determine whether we've already imported this decl.
- // FriendDecl is not a NamedDecl so we cannot use localUncachedLookup.
+ // FriendDecl is not a NamedDecl so we cannot use lookup.
auto *RD = cast<CXXRecordDecl>(DC);
FriendDecl *ImportedFriend = RD->getFirstFriend();
@@ -3007,30 +3432,42 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Not found. Create it.
FriendDecl::FriendUnion ToFU;
if (NamedDecl *FriendD = D->getFriendDecl()) {
- auto *ToFriendD = cast_or_null<NamedDecl>(Importer.Import(FriendD));
- if (ToFriendD && FriendD->getFriendObjectKind() != Decl::FOK_None &&
+ NamedDecl *ToFriendD;
+ if (Error Err = importInto(ToFriendD, FriendD))
+ return std::move(Err);
+
+ if (FriendD->getFriendObjectKind() != Decl::FOK_None &&
!(FriendD->isInIdentifierNamespace(Decl::IDNS_NonMemberOperator)))
ToFriendD->setObjectOfFriendDecl(false);
ToFU = ToFriendD;
- } else // The friend is a type, not a decl.
- ToFU = Importer.Import(D->getFriendType());
- if (!ToFU)
- return nullptr;
+ } else { // The friend is a type, not a decl.
+ if (auto TSIOrErr = import(D->getFriendType()))
+ ToFU = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ }
SmallVector<TemplateParameterList *, 1> ToTPLists(D->NumTPLists);
auto **FromTPLists = D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0; I < D->NumTPLists; I++) {
- TemplateParameterList *List = ImportTemplateParameterList(FromTPLists[I]);
- if (!List)
- return nullptr;
- ToTPLists[I] = List;
+ if (auto ListOrErr = ImportTemplateParameterList(FromTPLists[I]))
+ ToTPLists[I] = *ListOrErr;
+ else
+ return ListOrErr.takeError();
}
+ auto LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+ auto FriendLocOrErr = import(D->getFriendLoc());
+ if (!FriendLocOrErr)
+ return FriendLocOrErr.takeError();
+
FriendDecl *FrD;
if (GetImportedOrCreateDecl(FrD, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocation()), ToFU,
- Importer.Import(D->getFriendLoc()), ToTPLists))
+ *LocationOrErr, ToFU,
+ *FriendLocOrErr, ToTPLists))
return FrD;
FrD->setAccess(D->getAccess());
@@ -3039,22 +3476,21 @@ Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
return FrD;
}
-Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Import the major distinguishing characteristics of an ivar.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Determine whether we've already imported this ivar
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
- if (auto *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecl)) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecl)) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundIvar->getType())) {
Importer.MapImported(D, FoundIvar);
@@ -3065,26 +3501,27 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
<< Name << D->getType() << FoundIvar->getType();
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- Expr *BitWidth = Importer.Import(D->getBitWidth());
- if (!BitWidth && D->getBitWidth())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ Expr *ToBitWidth;
+ SourceLocation ToInnerLocStart;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getBitWidth(), D->getInnerLocStart()))
+ std::tie(ToType, ToTypeSourceInfo, ToBitWidth, ToInnerLocStart) = *Imp;
+ else
+ return Imp.takeError();
ObjCIvarDecl *ToIvar;
if (GetImportedOrCreateDecl(
ToIvar, D, Importer.getToContext(), cast<ObjCContainerDecl>(DC),
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo, D->getAccessControl(), BitWidth,
- D->getSynthesize()))
+ ToInnerLocStart, Loc, Name.getAsIdentifierInfo(),
+ ToType, ToTypeSourceInfo,
+ D->getAccessControl(),ToBitWidth, D->getSynthesize()))
return ToIvar;
ToIvar->setLexicalDeclContext(LexicalDC);
@@ -3092,25 +3529,36 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
return ToIvar;
}
-Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+
+ SmallVector<Decl*, 2> Redecls = getCanonicalForwardRedeclChain(D);
+ auto RedeclIt = Redecls.begin();
+ // Import the first part of the decl chain. I.e. import all previous
+ // declarations starting from the canonical decl.
+ for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
+ }
+ assert(*RedeclIt == D);
+
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Try to find a variable in our own ("to") context with the same name and
// in the same context as the variable we're importing.
+ VarDecl *FoundByLookup = nullptr;
if (D->isFileVarDecl()) {
- VarDecl *MergeWithVar = nullptr;
SmallVector<NamedDecl *, 4> ConflictingDecls;
unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
@@ -3121,7 +3569,23 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
D->hasExternalFormalLinkage()) {
if (Importer.IsStructurallyEquivalent(D->getType(),
FoundVar->getType())) {
- MergeWithVar = FoundVar;
+
+ // The VarDecl in the "From" context has a definition, but in the
+ // "To" context we already have a definition.
+ VarDecl *FoundDef = FoundVar->getDefinition();
+ if (D->isThisDeclarationADefinition() && FoundDef)
+ // FIXME Check for ODR error if the two definitions have
+ // different initializers?
+ return Importer.MapImported(D, FoundDef);
+
+ // The VarDecl in the "From" context has an initializer, but in the
+ // "To" context we already have an initializer.
+ const VarDecl *FoundDInit = nullptr;
+ if (D->getInit() && FoundVar->getAnyInitializer(FoundDInit))
+ // FIXME Diagnose ODR error if the two initializers are different?
+ return Importer.MapImported(D, const_cast<VarDecl*>(FoundDInit));
+
+ FoundByLookup = FoundVar;
break;
}
@@ -3133,16 +3597,16 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
if (isa<IncompleteArrayType>(FoundArray) &&
isa<ConstantArrayType>(TArray)) {
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ if (auto TyOrErr = import(D->getType()))
+ FoundVar->setType(*TyOrErr);
+ else
+ return TyOrErr.takeError();
- FoundVar->setType(T);
- MergeWithVar = FoundVar;
+ FoundByLookup = FoundVar;
break;
} else if (isa<IncompleteArrayType>(TArray) &&
isa<ConstantArrayType>(FoundArray)) {
- MergeWithVar = FoundVar;
+ FoundByLookup = FoundVar;
break;
}
}
@@ -3157,125 +3621,110 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
ConflictingDecls.push_back(FoundDecl);
}
- if (MergeWithVar) {
- // An equivalent variable with external linkage has been found. Link
- // the two declarations, then merge them.
- Importer.MapImported(D, MergeWithVar);
- updateFlags(D, MergeWithVar);
-
- if (VarDecl *DDef = D->getDefinition()) {
- if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
- Importer.ToDiag(ExistingDef->getLocation(),
- diag::err_odr_variable_multiple_def)
- << Name;
- Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here);
- } else {
- Expr *Init = Importer.Import(DDef->getInit());
- MergeWithVar->setInit(Init);
- if (DDef->isInitKnownICE()) {
- EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
- Eval->CheckedICE = true;
- Eval->IsICE = DDef->isInitICE();
- }
- }
- }
-
- return MergeWithVar;
- }
-
if (!ConflictingDecls.empty()) {
Name = Importer.HandleNameConflict(Name, DC, IDNS,
ConflictingDecls.data(),
ConflictingDecls.size());
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
- // Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToInnerLocStart;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getInnerLocStart(),
+ D->getQualifierLoc()))
+ std::tie(ToType, ToTypeSourceInfo, ToInnerLocStart, ToQualifierLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the imported variable.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
VarDecl *ToVar;
if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo,
+ ToInnerLocStart, Loc,
+ Name.getAsIdentifierInfo(),
+ ToType, ToTypeSourceInfo,
D->getStorageClass()))
return ToVar;
- ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToVar->setQualifierInfo(ToQualifierLoc);
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
- // Templated declarations should never appear in the enclosing DeclContext.
- if (!D->getDescribedVarTemplate())
- LexicalDC->addDeclInternal(ToVar);
+ if (FoundByLookup) {
+ auto *Recent = const_cast<VarDecl *>(FoundByLookup->getMostRecentDecl());
+ ToVar->setPreviousDecl(Recent);
+ }
- // Merge the initializer.
- if (ImportDefinition(D, ToVar))
- return nullptr;
+ if (Error Err = ImportInitializer(D, ToVar))
+ return std::move(Err);
if (D->isConstexpr())
ToVar->setConstexpr(true);
+ if (D->getDeclContext()->containsDeclAndLoad(D))
+ DC->addDeclInternal(ToVar);
+ if (DC != LexicalDC && D->getLexicalDeclContext()->containsDeclAndLoad(D))
+ LexicalDC->addDeclInternal(ToVar);
+
+ // Import the rest of the chain. I.e. import all subsequent declarations.
+ for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
+ ExpectedDecl RedeclOrErr = import(*RedeclIt);
+ if (!RedeclOrErr)
+ return RedeclOrErr.takeError();
+ }
+
return ToVar;
}
-Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the parameter's type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation;
+ QualType ToType;
+ if (auto Imp = importSeq(D->getDeclName(), D->getLocation(), D->getType()))
+ std::tie(ToDeclName, ToLocation, ToType) = *Imp;
+ else
+ return Imp.takeError();
// Create the imported parameter.
ImplicitParamDecl *ToParm = nullptr;
- if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(), T,
- D->getParameterKind()))
+ if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
+ ToLocation, ToDeclName.getAsIdentifierInfo(),
+ ToType, D->getParameterKind()))
return ToParm;
return ToParm;
}
-Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the parameter's type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation, ToInnerLocStart;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getDeclName(), D->getLocation(), D->getType(), D->getInnerLocStart(),
+ D->getTypeSourceInfo()))
+ std::tie(
+ ToDeclName, ToLocation, ToType, ToInnerLocStart,
+ ToTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
- // Create the imported parameter.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
ParmVarDecl *ToParm;
if (GetImportedOrCreateDecl(ToParm, D, Importer.getToContext(), DC,
- Importer.Import(D->getInnerLocStart()), Loc,
- Name.getAsIdentifierInfo(), T, TInfo,
- D->getStorageClass(),
+ ToInnerLocStart, ToLocation,
+ ToDeclName.getAsIdentifierInfo(), ToType,
+ ToTypeSourceInfo, D->getStorageClass(),
/*DefaultArg*/ nullptr))
return ToParm;
@@ -3283,21 +3732,19 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
ToParm->setKNRPromoted(D->isKNRPromoted());
- Expr *ToDefArg = nullptr;
- Expr *FromDefArg = nullptr;
if (D->hasUninstantiatedDefaultArg()) {
- FromDefArg = D->getUninstantiatedDefaultArg();
- ToDefArg = Importer.Import(FromDefArg);
- ToParm->setUninstantiatedDefaultArg(ToDefArg);
+ if (auto ToDefArgOrErr = import(D->getUninstantiatedDefaultArg()))
+ ToParm->setUninstantiatedDefaultArg(*ToDefArgOrErr);
+ else
+ return ToDefArgOrErr.takeError();
} else if (D->hasUnparsedDefaultArg()) {
ToParm->setUnparsedDefaultArg();
} else if (D->hasDefaultArg()) {
- FromDefArg = D->getDefaultArg();
- ToDefArg = Importer.Import(FromDefArg);
- ToParm->setDefaultArg(ToDefArg);
+ if (auto ToDefArgOrErr = import(D->getDefaultArg()))
+ ToParm->setDefaultArg(*ToDefArgOrErr);
+ else
+ return ToDefArgOrErr.takeError();
}
- if (FromDefArg && !ToDefArg)
- return nullptr;
if (D->isObjCMethodParameter()) {
ToParm->setObjCMethodScopeInfo(D->getFunctionScopeIndex());
@@ -3310,19 +3757,18 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
return ToParm;
}
-Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the major distinguishing characteristics of a method.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (auto *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecl)) {
if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
@@ -3337,7 +3783,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Check the number of parameters.
@@ -3348,7 +3795,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Check parameter types.
@@ -3363,7 +3811,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
<< (*P)->getType() << (*FoundP)->getType();
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
<< (*FoundP)->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
@@ -3375,7 +3824,8 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag(FoundMethod->getLocation(),
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// FIXME: Any other bits we need to merge?
@@ -3383,18 +3833,20 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
}
- // Import the result type.
- QualType ResultTy = Importer.Import(D->getReturnType());
- if (ResultTy.isNull())
- return nullptr;
-
- TypeSourceInfo *ReturnTInfo = Importer.Import(D->getReturnTypeSourceInfo());
+ SourceLocation ToEndLoc;
+ QualType ToReturnType;
+ TypeSourceInfo *ToReturnTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getEndLoc(), D->getReturnType(), D->getReturnTypeSourceInfo()))
+ std::tie(ToEndLoc, ToReturnType, ToReturnTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
ObjCMethodDecl *ToMethod;
if (GetImportedOrCreateDecl(
ToMethod, D, Importer.getToContext(), Loc,
- Importer.Import(D->getLocEnd()), Name.getObjCSelector(), ResultTy,
- ReturnTInfo, DC, D->isInstanceMethod(), D->isVariadic(),
+ ToEndLoc, Name.getObjCSelector(), ToReturnType,
+ ToReturnTypeSourceInfo, DC, D->isInstanceMethod(), D->isVariadic(),
D->isPropertyAccessor(), D->isImplicit(), D->isDefined(),
D->getImplementationControl(), D->hasRelatedResultType()))
return ToMethod;
@@ -3405,11 +3857,10 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
// Import the parameters
SmallVector<ParmVarDecl *, 5> ToParams;
for (auto *FromP : D->parameters()) {
- auto *ToP = cast_or_null<ParmVarDecl>(Importer.Import(FromP));
- if (!ToP)
- return nullptr;
-
- ToParams.push_back(ToP);
+ if (Expected<ParmVarDecl *> ToPOrErr = import(FromP))
+ ToParams.push_back(*ToPOrErr);
+ else
+ return ToPOrErr.takeError();
}
// Set the parameters.
@@ -3418,82 +3869,99 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
ToMethod->addDeclInternal(ToParam);
}
- SmallVector<SourceLocation, 12> SelLocs;
- D->getSelectorLocs(SelLocs);
- for (auto &Loc : SelLocs)
- Loc = Importer.Import(Loc);
+ SmallVector<SourceLocation, 12> FromSelLocs;
+ D->getSelectorLocs(FromSelLocs);
+ SmallVector<SourceLocation, 12> ToSelLocs(FromSelLocs.size());
+ if (Error Err = ImportContainerChecked(FromSelLocs, ToSelLocs))
+ return std::move(Err);
- ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, ToSelLocs);
ToMethod->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToMethod);
return ToMethod;
}
-Decl *ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- TypeSourceInfo *BoundInfo = Importer.Import(D->getTypeSourceInfo());
- if (!BoundInfo)
- return nullptr;
+ SourceLocation ToVarianceLoc, ToLocation, ToColonLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getVarianceLoc(), D->getLocation(), D->getColonLoc(),
+ D->getTypeSourceInfo()))
+ std::tie(ToVarianceLoc, ToLocation, ToColonLoc, ToTypeSourceInfo) = *Imp;
+ else
+ return Imp.takeError();
ObjCTypeParamDecl *Result;
if (GetImportedOrCreateDecl(
Result, D, Importer.getToContext(), DC, D->getVariance(),
- Importer.Import(D->getVarianceLoc()), D->getIndex(),
- Importer.Import(D->getLocation()), Name.getAsIdentifierInfo(),
- Importer.Import(D->getColonLoc()), BoundInfo))
+ ToVarianceLoc, D->getIndex(),
+ ToLocation, Name.getAsIdentifierInfo(),
+ ToColonLoc, ToTypeSourceInfo))
return Result;
Result->setLexicalDeclContext(LexicalDC);
return Result;
}
-Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
// Import the major distinguishing characteristics of a category.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- auto *ToInterface =
- cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
- if (!ToInterface)
- return nullptr;
+ ObjCInterfaceDecl *ToInterface;
+ if (Error Err = importInto(ToInterface, D->getClassInterface()))
+ return std::move(Err);
// Determine if we've already encountered this category.
ObjCCategoryDecl *MergeWithCategory
= ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
ObjCCategoryDecl *ToCategory = MergeWithCategory;
if (!ToCategory) {
+ SourceLocation ToAtStartLoc, ToCategoryNameLoc;
+ SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
+ if (auto Imp = importSeq(
+ D->getAtStartLoc(), D->getCategoryNameLoc(),
+ D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
+ std::tie(
+ ToAtStartLoc, ToCategoryNameLoc,
+ ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
+ else
+ return Imp.takeError();
if (GetImportedOrCreateDecl(ToCategory, D, Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()), Loc,
- Importer.Import(D->getCategoryNameLoc()),
+ ToAtStartLoc, Loc,
+ ToCategoryNameLoc,
Name.getAsIdentifierInfo(), ToInterface,
/*TypeParamList=*/nullptr,
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc())))
+ ToIvarLBraceLoc,
+ ToIvarRBraceLoc))
return ToCategory;
ToCategory->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToCategory);
- // Import the type parameter list after calling Imported, to avoid
+ // Import the type parameter list after MapImported, to avoid
// loops when bringing in their DeclContext.
- ToCategory->setTypeParamList(ImportObjCTypeParamList(
- D->getTypeParamList()));
+ if (auto PListOrErr = ImportObjCTypeParamList(D->getTypeParamList()))
+ ToCategory->setTypeParamList(*PListOrErr);
+ else
+ return PListOrErr.takeError();
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
@@ -3504,45 +3972,48 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
FromProtoEnd = D->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto =
- cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return nullptr;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
}
// FIXME: If we're merging, make sure that the protocol list is the same.
ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
+
} else {
Importer.MapImported(D, ToCategory);
}
// Import all of the members of this category.
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
// If we have an implementation, import it as well.
if (D->getImplementation()) {
- auto *Impl =
- cast_or_null<ObjCCategoryImplDecl>(
- Importer.Import(D->getImplementation()));
- if (!Impl)
- return nullptr;
-
- ToCategory->setImplementation(Impl);
+ if (Expected<ObjCCategoryImplDecl *> ToImplOrErr =
+ import(D->getImplementation()))
+ ToCategory->setImplementation(*ToImplOrErr);
+ else
+ return ToImplOrErr.takeError();
}
return ToCategory;
}
-bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
- ObjCProtocolDecl *To,
- ImportDefinitionKind Kind) {
+Error ASTNodeImporter::ImportDefinition(
+ ObjCProtocolDecl *From, ObjCProtocolDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition()) {
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From);
- return false;
+ if (Error Err = ImportDeclContext(From))
+ return Err;
+ return Error::success();
}
// Start the protocol definition
@@ -3551,17 +4022,22 @@ bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCProtocolDecl::protocol_loc_iterator
- FromProtoLoc = From->protocol_loc_begin();
+ ObjCProtocolDecl::protocol_loc_iterator FromProtoLoc =
+ From->protocol_loc_begin();
for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return true;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
+
}
// FIXME: If we're merging, make sure that the protocol list is the same.
@@ -3570,22 +4046,22 @@ bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this protocol.
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
}
- return false;
+ return Error::success();
}
-Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
// If this protocol has a definition in the translation unit we're coming
// from, but this particular declaration is not that definition, import the
// definition and map to that.
ObjCProtocolDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of a protocol.
@@ -3593,14 +4069,13 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
ObjCProtocolDecl *MergeWithProtocol = nullptr;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
continue;
@@ -3611,9 +4086,13 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
ObjCProtocolDecl *ToProto = MergeWithProtocol;
if (!ToProto) {
+ auto ToAtBeginLocOrErr = import(D->getAtStartLoc());
+ if (!ToAtBeginLocOrErr)
+ return ToAtBeginLocOrErr.takeError();
+
if (GetImportedOrCreateDecl(ToProto, D, Importer.getToContext(), DC,
Name.getAsIdentifierInfo(), Loc,
- Importer.Import(D->getAtStartLoc()),
+ *ToAtBeginLocOrErr,
/*PrevDecl=*/nullptr))
return ToProto;
ToProto->setLexicalDeclContext(LexicalDC);
@@ -3622,29 +4101,39 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
Importer.MapImported(D, ToProto);
- if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
- return nullptr;
+ if (D->isThisDeclarationADefinition())
+ if (Error Err = ImportDefinition(D, ToProto))
+ return std::move(Err);
return ToProto;
}
-Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- DeclContext *LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ExpectedDecl ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
+
+ ExpectedSLoc ExternLocOrErr = import(D->getExternLoc());
+ if (!ExternLocOrErr)
+ return ExternLocOrErr.takeError();
- SourceLocation ExternLoc = Importer.Import(D->getExternLoc());
- SourceLocation LangLoc = Importer.Import(D->getLocation());
+ ExpectedSLoc LangLocOrErr = import(D->getLocation());
+ if (!LangLocOrErr)
+ return LangLocOrErr.takeError();
bool HasBraces = D->hasBraces();
LinkageSpecDecl *ToLinkageSpec;
if (GetImportedOrCreateDecl(ToLinkageSpec, D, Importer.getToContext(), DC,
- ExternLoc, LangLoc, D->getLanguage(), HasBraces))
+ *ExternLocOrErr, *LangLocOrErr,
+ D->getLanguage(), HasBraces))
return ToLinkageSpec;
if (HasBraces) {
- SourceLocation RBraceLoc = Importer.Import(D->getRBraceLoc());
- ToLinkageSpec->setRBraceLoc(RBraceLoc);
+ ExpectedSLoc RBraceLocOrErr = import(D->getRBraceLoc());
+ if (!RBraceLocOrErr)
+ return RBraceLocOrErr.takeError();
+ ToLinkageSpec->setRBraceLoc(*RBraceLocOrErr);
}
ToLinkageSpec->setLexicalDeclContext(LexicalDC);
@@ -3653,24 +4142,31 @@ Decl *ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
return ToLinkageSpec;
}
-Decl *ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclarationNameInfo NameInfo(Name,
- Importer.Import(D->getNameInfo().getLoc()));
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ SourceLocation ToLoc, ToUsingLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc()))
+ std::tie(ToLoc, ToUsingLoc, ToQualifierLoc) = *Imp;
+ else
+ return Imp.takeError();
+
+ DeclarationNameInfo NameInfo(Name, ToLoc);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
UsingDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()), NameInfo,
+ ToUsingLoc, ToQualifierLoc, NameInfo,
D->hasTypename()))
return ToUsing;
@@ -3679,48 +4175,45 @@ Decl *ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
if (NamedDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingDecl(D)) {
- if (auto *ToPattern =
- dyn_cast_or_null<NamedDecl>(Importer.Import(FromPattern)))
- Importer.getToContext().setInstantiatedFromUsingDecl(ToUsing, ToPattern);
+ if (Expected<NamedDecl *> ToPatternOrErr = import(FromPattern))
+ Importer.getToContext().setInstantiatedFromUsingDecl(
+ ToUsing, *ToPatternOrErr);
else
- return nullptr;
+ return ToPatternOrErr.takeError();
}
- for (auto *FromShadow : D->shadows()) {
- if (auto *ToShadow =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromShadow)))
- ToUsing->addShadowDecl(ToShadow);
+ for (UsingShadowDecl *FromShadow : D->shadows()) {
+ if (Expected<UsingShadowDecl *> ToShadowOrErr = import(FromShadow))
+ ToUsing->addShadowDecl(*ToShadowOrErr);
else
- // FIXME: We return a nullptr here but the definition is already created
+ // FIXME: We return error here but the definition is already created
// and available with lookups. How to fix this?..
- return nullptr;
+ return ToShadowOrErr.takeError();
}
return ToUsing;
}
-Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- auto *ToUsing = dyn_cast_or_null<UsingDecl>(
- Importer.Import(D->getUsingDecl()));
- if (!ToUsing)
- return nullptr;
+ Expected<UsingDecl *> ToUsingOrErr = import(D->getUsingDecl());
+ if (!ToUsingOrErr)
+ return ToUsingOrErr.takeError();
- auto *ToTarget = dyn_cast_or_null<NamedDecl>(
- Importer.Import(D->getTargetDecl()));
- if (!ToTarget)
- return nullptr;
+ Expected<NamedDecl *> ToTargetOrErr = import(D->getTargetDecl());
+ if (!ToTargetOrErr)
+ return ToTargetOrErr.takeError();
UsingShadowDecl *ToShadow;
if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
- ToUsing, ToTarget))
+ *ToUsingOrErr, *ToTargetOrErr))
return ToShadow;
ToShadow->setLexicalDeclContext(LexicalDC);
@@ -3728,14 +4221,13 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
if (UsingShadowDecl *FromPattern =
Importer.getFromContext().getInstantiatedFromUsingShadowDecl(D)) {
- if (auto *ToPattern =
- dyn_cast_or_null<UsingShadowDecl>(Importer.Import(FromPattern)))
- Importer.getToContext().setInstantiatedFromUsingShadowDecl(ToShadow,
- ToPattern);
+ if (Expected<UsingShadowDecl *> ToPatternOrErr = import(FromPattern))
+ Importer.getToContext().setInstantiatedFromUsingShadowDecl(
+ ToShadow, *ToPatternOrErr);
else
- // FIXME: We return a nullptr here but the definition is already created
+ // FIXME: We return error here but the definition is already created
// and available with lookups. How to fix this?..
- return nullptr;
+ return ToPatternOrErr.takeError();
}
LexicalDC->addDeclInternal(ToShadow);
@@ -3743,32 +4235,40 @@ Decl *ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
return ToShadow;
}
-Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclContext *ToComAncestor = Importer.ImportContext(D->getCommonAncestor());
- if (!ToComAncestor)
- return nullptr;
-
- auto *ToNominated = cast_or_null<NamespaceDecl>(
- Importer.Import(D->getNominatedNamespace()));
- if (!ToNominated)
- return nullptr;
+ auto ToComAncestorOrErr = Importer.ImportContext(D->getCommonAncestor());
+ if (!ToComAncestorOrErr)
+ return ToComAncestorOrErr.takeError();
+
+ NamespaceDecl *ToNominatedNamespace;
+ SourceLocation ToUsingLoc, ToNamespaceKeyLocation, ToIdentLocation;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNominatedNamespace(), D->getUsingLoc(),
+ D->getNamespaceKeyLocation(), D->getQualifierLoc(),
+ D->getIdentLocation()))
+ std::tie(
+ ToNominatedNamespace, ToUsingLoc, ToNamespaceKeyLocation,
+ ToQualifierLoc, ToIdentLocation) = *Imp;
+ else
+ return Imp.takeError();
UsingDirectiveDecl *ToUsingDir;
if (GetImportedOrCreateDecl(ToUsingDir, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getNamespaceKeyLocation()),
- Importer.Import(D->getQualifierLoc()),
- Importer.Import(D->getIdentLocation()),
- ToNominated, ToComAncestor))
+ ToUsingLoc,
+ ToNamespaceKeyLocation,
+ ToQualifierLoc,
+ ToIdentLocation,
+ ToNominatedNamespace, *ToComAncestorOrErr))
return ToUsingDir;
ToUsingDir->setLexicalDeclContext(LexicalDC);
@@ -3777,25 +4277,34 @@ Decl *ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
return ToUsingDir;
}
-Decl *ASTNodeImporter::VisitUnresolvedUsingValueDecl(
+ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
UnresolvedUsingValueDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
- DeclarationNameInfo NameInfo(Name, Importer.Import(D->getNameInfo().getLoc()));
- ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+ SourceLocation ToLoc, ToUsingLoc, ToEllipsisLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getNameInfo().getLoc(), D->getUsingLoc(), D->getQualifierLoc(),
+ D->getEllipsisLoc()))
+ std::tie(ToLoc, ToUsingLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
+ else
+ return Imp.takeError();
+
+ DeclarationNameInfo NameInfo(Name, ToLoc);
+ if (Error Err = ImportDeclarationNameLoc(D->getNameInfo(), NameInfo))
+ return std::move(Err);
UnresolvedUsingValueDecl *ToUsingValue;
if (GetImportedOrCreateDecl(ToUsingValue, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getQualifierLoc()), NameInfo,
- Importer.Import(D->getEllipsisLoc())))
+ ToUsingLoc, ToQualifierLoc, NameInfo,
+ ToEllipsisLoc))
return ToUsingValue;
ToUsingValue->setAccess(D->getAccess());
@@ -3805,23 +4314,30 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingValueDecl(
return ToUsingValue;
}
-Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
+ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD = nullptr;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
+ SourceLocation ToUsingLoc, ToTypenameLoc, ToEllipsisLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ if (auto Imp = importSeq(
+ D->getUsingLoc(), D->getTypenameLoc(), D->getQualifierLoc(),
+ D->getEllipsisLoc()))
+ std::tie(ToUsingLoc, ToTypenameLoc, ToQualifierLoc, ToEllipsisLoc) = *Imp;
+ else
+ return Imp.takeError();
+
UnresolvedUsingTypenameDecl *ToUsing;
if (GetImportedOrCreateDecl(ToUsing, D, Importer.getToContext(), DC,
- Importer.Import(D->getUsingLoc()),
- Importer.Import(D->getTypenameLoc()),
- Importer.Import(D->getQualifierLoc()), Loc, Name,
- Importer.Import(D->getEllipsisLoc())))
+ ToUsingLoc, ToTypenameLoc,
+ ToQualifierLoc, Loc, Name, ToEllipsisLoc))
return ToUsing;
ToUsing->setAccess(D->getAccess());
@@ -3831,16 +4347,17 @@ Decl *ASTNodeImporter::VisitUnresolvedUsingTypenameDecl(
return ToUsing;
}
-bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
- ObjCInterfaceDecl *To,
- ImportDefinitionKind Kind) {
+
+Error ASTNodeImporter::ImportDefinition(
+ ObjCInterfaceDecl *From, ObjCInterfaceDecl *To, ImportDefinitionKind Kind) {
if (To->getDefinition()) {
// Check consistency of superclass.
ObjCInterfaceDecl *FromSuper = From->getSuperClass();
if (FromSuper) {
- FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
- if (!FromSuper)
- return true;
+ if (auto FromSuperOrErr = import(FromSuper))
+ FromSuper = *FromSuperOrErr;
+ else
+ return FromSuperOrErr.takeError();
}
ObjCInterfaceDecl *ToSuper = To->getSuperClass();
@@ -3865,8 +4382,9 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
}
if (shouldForceImportDeclContext(Kind))
- ImportDeclContext(From);
- return false;
+ if (Error Err = ImportDeclContext(From))
+ return Err;
+ return Error::success();
}
// Start the definition.
@@ -3874,28 +4392,32 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
// If this class has a superclass, import it.
if (From->getSuperClass()) {
- TypeSourceInfo *SuperTInfo = Importer.Import(From->getSuperClassTInfo());
- if (!SuperTInfo)
- return true;
-
- To->setSuperClass(SuperTInfo);
+ if (auto SuperTInfoOrErr = import(From->getSuperClassTInfo()))
+ To->setSuperClass(*SuperTInfoOrErr);
+ else
+ return SuperTInfoOrErr.takeError();
}
// Import protocols
SmallVector<ObjCProtocolDecl *, 4> Protocols;
SmallVector<SourceLocation, 4> ProtocolLocs;
- ObjCInterfaceDecl::protocol_loc_iterator
- FromProtoLoc = From->protocol_loc_begin();
+ ObjCInterfaceDecl::protocol_loc_iterator FromProtoLoc =
+ From->protocol_loc_begin();
for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
FromProtoEnd = From->protocol_end();
FromProto != FromProtoEnd;
++FromProto, ++FromProtoLoc) {
- auto *ToProto = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
- if (!ToProto)
- return true;
- Protocols.push_back(ToProto);
- ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ if (Expected<ObjCProtocolDecl *> ToProtoOrErr = import(*FromProto))
+ Protocols.push_back(*ToProtoOrErr);
+ else
+ return ToProtoOrErr.takeError();
+
+ if (ExpectedSLoc ToProtoLocOrErr = import(*FromProtoLoc))
+ ProtocolLocs.push_back(*ToProtoLocOrErr);
+ else
+ return ToProtoLocOrErr.takeError();
+
}
// FIXME: If we're merging, make sure that the protocol list is the same.
@@ -3904,58 +4426,66 @@ bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
// Import categories. When the categories themselves are imported, they'll
// hook themselves into this interface.
- for (auto *Cat : From->known_categories())
- Importer.Import(Cat);
+ for (auto *Cat : From->known_categories()) {
+ auto ToCatOrErr = import(Cat);
+ if (!ToCatOrErr)
+ return ToCatOrErr.takeError();
+ }
// If we have an @implementation, import it as well.
if (From->getImplementation()) {
- auto *Impl = cast_or_null<ObjCImplementationDecl>(
- Importer.Import(From->getImplementation()));
- if (!Impl)
- return true;
-
- To->setImplementation(Impl);
+ if (Expected<ObjCImplementationDecl *> ToImplOrErr =
+ import(From->getImplementation()))
+ To->setImplementation(*ToImplOrErr);
+ else
+ return ToImplOrErr.takeError();
}
if (shouldForceImportDeclContext(Kind)) {
// Import all of the members of this class.
- ImportDeclContext(From, /*ForceImport=*/true);
+ if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
+ return Err;
}
- return false;
+ return Error::success();
}
-ObjCTypeParamList *
+Expected<ObjCTypeParamList *>
ASTNodeImporter::ImportObjCTypeParamList(ObjCTypeParamList *list) {
if (!list)
return nullptr;
SmallVector<ObjCTypeParamDecl *, 4> toTypeParams;
- for (auto fromTypeParam : *list) {
- auto *toTypeParam = cast_or_null<ObjCTypeParamDecl>(
- Importer.Import(fromTypeParam));
- if (!toTypeParam)
- return nullptr;
-
- toTypeParams.push_back(toTypeParam);
+ for (auto *fromTypeParam : *list) {
+ if (auto toTypeParamOrErr = import(fromTypeParam))
+ toTypeParams.push_back(*toTypeParamOrErr);
+ else
+ return toTypeParamOrErr.takeError();
}
+ auto LAngleLocOrErr = import(list->getLAngleLoc());
+ if (!LAngleLocOrErr)
+ return LAngleLocOrErr.takeError();
+
+ auto RAngleLocOrErr = import(list->getRAngleLoc());
+ if (!RAngleLocOrErr)
+ return RAngleLocOrErr.takeError();
+
return ObjCTypeParamList::create(Importer.getToContext(),
- Importer.Import(list->getLAngleLoc()),
+ *LAngleLocOrErr,
toTypeParams,
- Importer.Import(list->getRAngleLoc()));
+ *RAngleLocOrErr);
}
-Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// If this class has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
ObjCInterfaceDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of an @interface.
@@ -3963,15 +4493,14 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Look for an existing interface with the same name.
ObjCInterfaceDecl *MergeWithIface = nullptr;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
@@ -3983,9 +4512,13 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// Create an interface declaration, if one does not already exist.
ObjCInterfaceDecl *ToIface = MergeWithIface;
if (!ToIface) {
+ ExpectedSLoc AtBeginLocOrErr = import(D->getAtStartLoc());
+ if (!AtBeginLocOrErr)
+ return AtBeginLocOrErr.takeError();
+
if (GetImportedOrCreateDecl(
ToIface, D, Importer.getToContext(), DC,
- Importer.Import(D->getAtStartLoc()), Name.getAsIdentifierInfo(),
+ *AtBeginLocOrErr, Name.getAsIdentifierInfo(),
/*TypeParamList=*/nullptr,
/*PrevDecl=*/nullptr, Loc, D->isImplicitInterfaceDecl()))
return ToIface;
@@ -3993,91 +4526,99 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
LexicalDC->addDeclInternal(ToIface);
}
Importer.MapImported(D, ToIface);
- // Import the type parameter list after calling Imported, to avoid
+ // Import the type parameter list after MapImported, to avoid
// loops when bringing in their DeclContext.
- ToIface->setTypeParamList(ImportObjCTypeParamList(
- D->getTypeParamListAsWritten()));
+ if (auto ToPListOrErr =
+ ImportObjCTypeParamList(D->getTypeParamListAsWritten()))
+ ToIface->setTypeParamList(*ToPListOrErr);
+ else
+ return ToPListOrErr.takeError();
- if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
- return nullptr;
+ if (D->isThisDeclarationADefinition())
+ if (Error Err = ImportDefinition(D, ToIface))
+ return std::move(Err);
return ToIface;
}
-Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
- auto *Category = cast_or_null<ObjCCategoryDecl>(
- Importer.Import(D->getCategoryDecl()));
- if (!Category)
- return nullptr;
+ExpectedDecl
+ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category;
+ if (Error Err = importInto(Category, D->getCategoryDecl()))
+ return std::move(Err);
ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
if (!ToImpl) {
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
+
+ SourceLocation ToLocation, ToAtStartLoc, ToCategoryNameLoc;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAtStartLoc(), D->getCategoryNameLoc()))
+ std::tie(ToLocation, ToAtStartLoc, ToCategoryNameLoc) = *Imp;
+ else
+ return Imp.takeError();
- SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
if (GetImportedOrCreateDecl(
ToImpl, D, Importer.getToContext(), DC,
Importer.Import(D->getIdentifier()), Category->getClassInterface(),
- Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()), CategoryNameLoc))
+ ToLocation, ToAtStartLoc, ToCategoryNameLoc))
return ToImpl;
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
-
- ToImpl->setLexicalDeclContext(LexicalDC);
- }
-
+ ToImpl->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToImpl);
Category->setImplementation(ToImpl);
}
Importer.MapImported(D, ToImpl);
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
+
return ToImpl;
}
-Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
// Find the corresponding interface.
- auto *Iface = cast_or_null<ObjCInterfaceDecl>(
- Importer.Import(D->getClassInterface()));
- if (!Iface)
- return nullptr;
+ ObjCInterfaceDecl *Iface;
+ if (Error Err = importInto(Iface, D->getClassInterface()))
+ return std::move(Err);
// Import the superclass, if any.
- ObjCInterfaceDecl *Super = nullptr;
- if (D->getSuperClass()) {
- Super = cast_or_null<ObjCInterfaceDecl>(
- Importer.Import(D->getSuperClass()));
- if (!Super)
- return nullptr;
- }
+ ObjCInterfaceDecl *Super;
+ if (Error Err = importInto(Super, D->getSuperClass()))
+ return std::move(Err);
ObjCImplementationDecl *Impl = Iface->getImplementation();
if (!Impl) {
// We haven't imported an implementation yet. Create a new @implementation
// now.
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
+
+ SourceLocation ToLocation, ToAtStartLoc, ToSuperClassLoc;
+ SourceLocation ToIvarLBraceLoc, ToIvarRBraceLoc;
+ if (auto Imp = importSeq(
+ D->getLocation(), D->getAtStartLoc(), D->getSuperClassLoc(),
+ D->getIvarLBraceLoc(), D->getIvarRBraceLoc()))
+ std::tie(
+ ToLocation, ToAtStartLoc, ToSuperClassLoc,
+ ToIvarLBraceLoc, ToIvarRBraceLoc) = *Imp;
+ else
+ return Imp.takeError();
+
if (GetImportedOrCreateDecl(Impl, D, Importer.getToContext(),
- Importer.ImportContext(D->getDeclContext()),
- Iface, Super, Importer.Import(D->getLocation()),
- Importer.Import(D->getAtStartLoc()),
- Importer.Import(D->getSuperClassLoc()),
- Importer.Import(D->getIvarLBraceLoc()),
- Importer.Import(D->getIvarRBraceLoc())))
+ DC, Iface, Super,
+ ToLocation,
+ ToAtStartLoc,
+ ToSuperClassLoc,
+ ToIvarLBraceLoc,
+ ToIvarRBraceLoc))
return Impl;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- DeclContext *LexicalDC
- = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- Impl->setLexicalDeclContext(LexicalDC);
- }
+ Impl->setLexicalDeclContext(LexicalDC);
// Associate the implementation with the class it implements.
Iface->setImplementation(Impl);
@@ -4110,30 +4651,31 @@ Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
else
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_missing_superclass);
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
}
// Import all of the members of this @implementation.
- ImportDeclContext(D);
+ if (Error Err = ImportDeclContext(D))
+ return std::move(Err);
return Impl;
}
-Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
// Import the major distinguishing characteristics of an @property.
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
// Check whether we have already imported this property.
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (auto *FoundProp = dyn_cast<ObjCPropertyDecl>(FoundDecl)) {
// Check property types.
@@ -4143,7 +4685,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
<< Name << D->getType() << FoundProp->getType();
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// FIXME: Check property attributes, getters, setters, etc.?
@@ -4154,79 +4697,88 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
}
- // Import the type.
- TypeSourceInfo *TSI = Importer.Import(D->getTypeSourceInfo());
- if (!TSI)
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToAtLoc, ToLParenLoc;
+ if (auto Imp = importSeq(
+ D->getType(), D->getTypeSourceInfo(), D->getAtLoc(), D->getLParenLoc()))
+ std::tie(ToType, ToTypeSourceInfo, ToAtLoc, ToLParenLoc) = *Imp;
+ else
+ return Imp.takeError();
// Create the new property.
ObjCPropertyDecl *ToProperty;
if (GetImportedOrCreateDecl(
ToProperty, D, Importer.getToContext(), DC, Loc,
- Name.getAsIdentifierInfo(), Importer.Import(D->getAtLoc()),
- Importer.Import(D->getLParenLoc()), Importer.Import(D->getType()),
- TSI, D->getPropertyImplementation()))
+ Name.getAsIdentifierInfo(), ToAtLoc,
+ ToLParenLoc, ToType,
+ ToTypeSourceInfo, D->getPropertyImplementation()))
return ToProperty;
+ Selector ToGetterName, ToSetterName;
+ SourceLocation ToGetterNameLoc, ToSetterNameLoc;
+ ObjCMethodDecl *ToGetterMethodDecl, *ToSetterMethodDecl;
+ ObjCIvarDecl *ToPropertyIvarDecl;
+ if (auto Imp = importSeq(
+ D->getGetterName(), D->getSetterName(),
+ D->getGetterNameLoc(), D->getSetterNameLoc(),
+ D->getGetterMethodDecl(), D->getSetterMethodDecl(),
+ D->getPropertyIvarDecl()))
+ std::tie(
+ ToGetterName, ToSetterName,
+ ToGetterNameLoc, ToSetterNameLoc,
+ ToGetterMethodDecl, ToSetterMethodDecl,
+ ToPropertyIvarDecl) = *Imp;
+ else
+ return Imp.takeError();
+
ToProperty->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
ToProperty->setPropertyAttributesAsWritten(
D->getPropertyAttributesAsWritten());
- ToProperty->setGetterName(Importer.Import(D->getGetterName()),
- Importer.Import(D->getGetterNameLoc()));
- ToProperty->setSetterName(Importer.Import(D->getSetterName()),
- Importer.Import(D->getSetterNameLoc()));
- ToProperty->setGetterMethodDecl(
- cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
- ToProperty->setSetterMethodDecl(
- cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
- ToProperty->setPropertyIvarDecl(
- cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ ToProperty->setGetterName(ToGetterName, ToGetterNameLoc);
+ ToProperty->setSetterName(ToSetterName, ToSetterNameLoc);
+ ToProperty->setGetterMethodDecl(ToGetterMethodDecl);
+ ToProperty->setSetterMethodDecl(ToSetterMethodDecl);
+ ToProperty->setPropertyIvarDecl(ToPropertyIvarDecl);
return ToProperty;
}
-Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
- auto *Property = cast_or_null<ObjCPropertyDecl>(
- Importer.Import(D->getPropertyDecl()));
- if (!Property)
- return nullptr;
-
- DeclContext *DC = Importer.ImportContext(D->getDeclContext());
- if (!DC)
- return nullptr;
+ExpectedDecl
+ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property;
+ if (Error Err = importInto(Property, D->getPropertyDecl()))
+ return std::move(Err);
- // Import the lexical declaration context.
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
- auto *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
- if (!InImpl)
- return nullptr;
+ auto *InImpl = cast<ObjCImplDecl>(LexicalDC);
// Import the ivar (for an @synthesize).
ObjCIvarDecl *Ivar = nullptr;
- if (D->getPropertyIvarDecl()) {
- Ivar = cast_or_null<ObjCIvarDecl>(
- Importer.Import(D->getPropertyIvarDecl()));
- if (!Ivar)
- return nullptr;
- }
+ if (Error Err = importInto(Ivar, D->getPropertyIvarDecl()))
+ return std::move(Err);
ObjCPropertyImplDecl *ToImpl
= InImpl->FindPropertyImplDecl(Property->getIdentifier(),
Property->getQueryKind());
if (!ToImpl) {
+ SourceLocation ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc;
+ if (auto Imp = importSeq(
+ D->getBeginLoc(), D->getLocation(), D->getPropertyIvarDeclLoc()))
+ std::tie(ToBeginLoc, ToLocation, ToPropertyIvarDeclLoc) = *Imp;
+ else
+ return Imp.takeError();
+
if (GetImportedOrCreateDecl(ToImpl, D, Importer.getToContext(), DC,
- Importer.Import(D->getLocStart()),
- Importer.Import(D->getLocation()), Property,
+ ToBeginLoc,
+ ToLocation, Property,
D->getPropertyImplementation(), Ivar,
- Importer.Import(D->getPropertyIvarDeclLoc())))
+ ToPropertyIvarDeclLoc))
return ToImpl;
ToImpl->setLexicalDeclContext(LexicalDC);
@@ -4244,7 +4796,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
diag::note_odr_objc_property_impl_kind)
<< D->getPropertyDecl()->getDeclName()
<< (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// For @synthesize, check that we have the same
@@ -4258,7 +4811,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
Importer.FromDiag(D->getPropertyIvarDeclLoc(),
diag::note_odr_objc_synthesize_ivar_here)
<< D->getPropertyIvarDecl()->getDeclName();
- return nullptr;
+
+ return make_error<ImportError>(ImportError::NameConflict);
}
// Merge the existing implementation with the new implementation.
@@ -4268,41 +4822,46 @@ Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
return ToImpl;
}
-Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
// For template arguments, we adopt the translation unit as our declaration
// context. This context will be fixed when the actual template declaration
// is created.
// FIXME: Import default argument.
+
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ ExpectedSLoc LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
+
TemplateTypeParmDecl *ToD = nullptr;
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getLocStart()), Importer.Import(D->getLocation()),
+ *BeginLocOrErr, *LocationOrErr,
D->getDepth(), D->getIndex(), Importer.Import(D->getIdentifier()),
D->wasDeclaredWithTypename(), D->isParameterPack());
return ToD;
}
-Decl *
+ExpectedDecl
ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- // Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
-
- // Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
-
- // Import the type of this declaration.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
-
- // Import type-source information.
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
+ DeclarationName ToDeclName;
+ SourceLocation ToLocation, ToInnerLocStart;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ if (auto Imp = importSeq(
+ D->getDeclName(), D->getLocation(), D->getType(), D->getTypeSourceInfo(),
+ D->getInnerLocStart()))
+ std::tie(
+ ToDeclName, ToLocation, ToType, ToTypeSourceInfo,
+ ToInnerLocStart) = *Imp;
+ else
+ return Imp.takeError();
// FIXME: Import default argument.
@@ -4310,36 +4869,39 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
Importer.getToContext().getTranslationUnitDecl(),
- Importer.Import(D->getInnerLocStart()), Loc, D->getDepth(),
- D->getPosition(), Name.getAsIdentifierInfo(), T, D->isParameterPack(),
- TInfo);
+ ToInnerLocStart, ToLocation, D->getDepth(),
+ D->getPosition(), ToDeclName.getAsIdentifierInfo(), ToType,
+ D->isParameterPack(), ToTypeSourceInfo);
return ToD;
}
-Decl *
+ExpectedDecl
ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
// Import the name of this declaration.
- DeclarationName Name = Importer.Import(D->getDeclName());
- if (D->getDeclName() && !Name)
- return nullptr;
+ auto NameOrErr = import(D->getDeclName());
+ if (!NameOrErr)
+ return NameOrErr.takeError();
// Import the location of this declaration.
- SourceLocation Loc = Importer.Import(D->getLocation());
+ ExpectedSLoc LocationOrErr = import(D->getLocation());
+ if (!LocationOrErr)
+ return LocationOrErr.takeError();
// Import template parameters.
- TemplateParameterList *TemplateParams
- = ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
// FIXME: Import default argument.
TemplateTemplateParmDecl *ToD = nullptr;
(void)GetImportedOrCreateDecl(
ToD, D, Importer.getToContext(),
- Importer.getToContext().getTranslationUnitDecl(), Loc, D->getDepth(),
- D->getPosition(), D->isParameterPack(), Name.getAsIdentifierInfo(),
- TemplateParams);
+ Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr,
+ D->getDepth(), D->getPosition(), D->isParameterPack(),
+ (*NameOrErr).getAsIdentifierInfo(),
+ *TemplateParamsOrErr);
return ToD;
}
@@ -4354,19 +4916,18 @@ static ClassTemplateDecl *getDefinition(ClassTemplateDecl *D) {
return TemplateWithDef;
}
-Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
- // If this record has a definition in the translation unit we're coming from,
- // but this particular declaration is not that definition, import the
- // definition and map to that.
- auto *Definition =
- cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
- if (Definition && Definition != D->getTemplatedDecl()) {
- Decl *ImportedDef
- = Importer.Import(Definition->getDescribedClassTemplate());
- if (!ImportedDef)
- return nullptr;
+ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ bool IsFriend = D->getFriendObjectKind() != Decl::FOK_None;
- return Importer.MapImported(D, ImportedDef);
+ // If this template has a definition in the translation unit we're coming
+ // from, but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ClassTemplateDecl *Definition = getDefinition(D);
+ if (Definition && Definition != D && !IsFriend) {
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of this class template.
@@ -4374,40 +4935,33 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
+ ClassTemplateDecl *FoundByLookup = nullptr;
+
// We may already have a template of the same name; try to find and match it.
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
- if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary |
+ Decl::IDNS_TagFriend))
continue;
Decl *Found = FoundDecl;
- if (auto *FoundTemplate = dyn_cast<ClassTemplateDecl>(Found)) {
-
- // The class to be imported is a definition.
- if (D->isThisDeclarationADefinition()) {
- // Lookup will find the fwd decl only if that is more recent than the
- // definition. So, try to get the definition if that is available in
- // the redecl chain.
- ClassTemplateDecl *TemplateWithDef = getDefinition(FoundTemplate);
- if (!TemplateWithDef)
- continue;
- FoundTemplate = TemplateWithDef; // Continue with the definition.
- }
+ auto *FoundTemplate = dyn_cast<ClassTemplateDecl>(Found);
+ if (FoundTemplate) {
if (IsStructuralMatch(D, FoundTemplate)) {
- // The class templates structurally match; call it the same template.
-
- Importer.MapImported(D->getTemplatedDecl(),
- FoundTemplate->getTemplatedDecl());
- return Importer.MapImported(D, FoundTemplate);
+ ClassTemplateDecl *TemplateWithDef = getDefinition(FoundTemplate);
+ if (D->isThisDeclarationADefinition() && TemplateWithDef) {
+ return Importer.MapImported(D, TemplateWithDef);
+ }
+ FoundByLookup = FoundTemplate;
+ break;
}
}
@@ -4421,33 +4975,61 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
if (!Name)
- return nullptr;
+ return make_error<ImportError>(ImportError::NameConflict);
}
CXXRecordDecl *FromTemplated = D->getTemplatedDecl();
// Create the declaration that is being templated.
- auto *ToTemplated = cast_or_null<CXXRecordDecl>(
- Importer.Import(FromTemplated));
- if (!ToTemplated)
- return nullptr;
+ CXXRecordDecl *ToTemplated;
+ if (Error Err = importInto(ToTemplated, FromTemplated))
+ return std::move(Err);
// Create the class template declaration itself.
- TemplateParameterList *TemplateParams =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
ClassTemplateDecl *D2;
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, Loc, Name,
- TemplateParams, ToTemplated))
+ *TemplateParamsOrErr, ToTemplated))
return D2;
ToTemplated->setDescribedClassTemplate(D2);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+
+ if (D->getDeclContext()->containsDeclAndLoad(D))
+ DC->addDeclInternal(D2);
+ if (DC != LexicalDC && D->getLexicalDeclContext()->containsDeclAndLoad(D))
+ LexicalDC->addDeclInternal(D2);
+
+ if (FoundByLookup) {
+ auto *Recent =
+ const_cast<ClassTemplateDecl *>(FoundByLookup->getMostRecentDecl());
+
+ // It is possible that during the import of the class template definition
+ // we start the import of a fwd friend decl of the very same class template
+ // and we add the fwd friend decl to the lookup table. But the ToTemplated
+ // had been created earlier and by that time the lookup could not find
+ // anything existing, so it has no previous decl. Later, (still during the
+ // import of the fwd friend decl) we start to import the definition again
+ // and this time the lookup finds the previous fwd friend class template.
+ // In this case we must set up the previous decl for the templated decl.
+ if (!ToTemplated->getPreviousDecl()) {
+ CXXRecordDecl *PrevTemplated =
+ FoundByLookup->getTemplatedDecl()->getMostRecentDecl();
+ if (ToTemplated != PrevTemplated)
+ ToTemplated->setPreviousDecl(PrevTemplated);
+ }
+
+ D2->setPreviousDecl(Recent);
+ }
+
+ if (LexicalDC != DC && IsFriend)
+ DC->makeDeclVisibleInContext(D2);
if (FromTemplated->isCompleteDefinition() &&
!ToTemplated->isCompleteDefinition()) {
@@ -4457,124 +5039,173 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
return D2;
}
-Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
TagDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
- auto *ClassTemplate =
- cast_or_null<ClassTemplateDecl>(Importer.Import(
- D->getSpecializedTemplate()));
- if (!ClassTemplate)
- return nullptr;
+ ClassTemplateDecl *ClassTemplate;
+ if (Error Err = importInto(ClassTemplate, D->getSpecializedTemplate()))
+ return std::move(Err);
// Import the context of this declaration.
- DeclContext *DC = ClassTemplate->getDeclContext();
- if (!DC)
- return nullptr;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
-
- // Import the location of this declaration.
- SourceLocation StartLoc = Importer.Import(D->getLocStart());
- SourceLocation IdLoc = Importer.Import(D->getLocation());
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (ImportTemplateArguments(D->getTemplateArgs().data(),
- D->getTemplateArgs().size(),
- TemplateArgs))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ return std::move(Err);
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
- ClassTemplateSpecializationDecl *D2
- = ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
- if (D2) {
- // We already have a class template specialization with these template
- // arguments.
+ ClassTemplateSpecializationDecl *D2 = nullptr;
+ ClassTemplatePartialSpecializationDecl *PartialSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(D);
+ if (PartialSpec)
+ D2 = ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos);
+ else
+ D2 = ClassTemplate->findSpecialization(TemplateArgs, InsertPos);
+ ClassTemplateSpecializationDecl * const PrevDecl = D2;
+ RecordDecl *FoundDef = D2 ? D2->getDefinition() : nullptr;
+ if (FoundDef) {
+ if (!D->isCompleteDefinition()) {
+ // The "From" translation unit only had a forward declaration; call it
+ // the same declaration.
+ // TODO Handle the redecl chain properly!
+ return Importer.MapImported(D, FoundDef);
+ }
- // FIXME: Check for specialization vs. instantiation errors.
+ if (IsStructuralMatch(D, FoundDef)) {
- if (RecordDecl *FoundDef = D2->getDefinition()) {
- if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
- // The record types structurally match, or the "from" translation
- // unit only had a forward declaration anyway; call it the same
- // function.
- return Importer.MapImported(D, FoundDef);
+ Importer.MapImported(D, FoundDef);
+
+ // Import those those default field initializers which have been
+ // instantiated in the "From" context, but not in the "To" context.
+ for (auto *FromField : D->fields()) {
+ auto ToOrErr = import(FromField);
+ if (!ToOrErr)
+ // FIXME: return the error?
+ consumeError(ToOrErr.takeError());
+ }
+
+ // Import those methods which have been instantiated in the
+ // "From" context, but not in the "To" context.
+ for (CXXMethodDecl *FromM : D->methods()) {
+ auto ToOrErr = import(FromM);
+ if (!ToOrErr)
+ // FIXME: return the error?
+ consumeError(ToOrErr.takeError());
}
+
+ // TODO Import instantiated default arguments.
+ // TODO Import instantiated exception specifications.
+ //
+ // Generally, ASTCommon.h/DeclUpdateKind enum gives a very good hint what
+ // else could be fused during an AST merge.
+
+ return FoundDef;
}
- } else {
- // Create a new specialization.
- if (auto *PartialSpec =
- dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
- // Import TemplateArgumentListInfo
+ } else { // We either couldn't find any previous specialization in the "To"
+ // context, or we found one but without definition. Let's create a
+ // new specialization and register that at the class template.
+
+ // Import the location of this declaration.
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+ ExpectedSLoc IdLocOrErr = import(D->getLocation());
+ if (!IdLocOrErr)
+ return IdLocOrErr.takeError();
+
+ if (PartialSpec) {
+ // Import TemplateArgumentListInfo.
TemplateArgumentListInfo ToTAInfo;
const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
- if (ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo))
+ return std::move(Err);
- QualType CanonInjType = Importer.Import(
- PartialSpec->getInjectedSpecializationType());
- if (CanonInjType.isNull())
- return nullptr;
+ QualType CanonInjType;
+ if (Error Err = importInto(
+ CanonInjType, PartialSpec->getInjectedSpecializationType()))
+ return std::move(Err);
CanonInjType = CanonInjType.getCanonicalType();
- TemplateParameterList *ToTPList = ImportTemplateParameterList(
- PartialSpec->getTemplateParameters());
- if (!ToTPList && PartialSpec->getTemplateParameters())
- return nullptr;
+ auto ToTPListOrErr = ImportTemplateParameterList(
+ PartialSpec->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
- D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
- IdLoc, ToTPList, ClassTemplate,
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, ClassTemplate,
llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
- ToTAInfo, CanonInjType, nullptr))
+ ToTAInfo, CanonInjType,
+ cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
return D2;
- } else {
+ // Update InsertPos, because preceding import calls may have invalidated
+ // it by adding new specializations.
+ if (!ClassTemplate->findPartialSpecialization(TemplateArgs, InsertPos))
+ // Add this partial specialization to the class template.
+ ClassTemplate->AddPartialSpecialization(
+ cast<ClassTemplatePartialSpecializationDecl>(D2), InsertPos);
+
+ } else { // Not a partial specialization.
if (GetImportedOrCreateDecl(
- D2, D, Importer.getToContext(), D->getTagKind(), DC, StartLoc,
- IdLoc, ClassTemplate, TemplateArgs, /*PrevDecl=*/nullptr))
+ D2, D, Importer.getToContext(), D->getTagKind(), DC,
+ *BeginLocOrErr, *IdLocOrErr, ClassTemplate, TemplateArgs,
+ PrevDecl))
return D2;
+
+ // Update InsertPos, because preceding import calls may have invalidated
+ // it by adding new specializations.
+ if (!ClassTemplate->findSpecialization(TemplateArgs, InsertPos))
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
}
D2->setSpecializationKind(D->getSpecializationKind());
- // Add this specialization to the class template.
- ClassTemplate->AddSpecialization(D2, InsertPos);
-
// Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
if (auto *TSI = D->getTypeAsWritten()) {
- TypeSourceInfo *TInfo = Importer.Import(TSI);
- if (!TInfo)
- return nullptr;
- D2->setTypeAsWritten(TInfo);
- D2->setTemplateKeywordLoc(Importer.Import(D->getTemplateKeywordLoc()));
- D2->setExternLoc(Importer.Import(D->getExternLoc()));
+ if (auto TInfoOrErr = import(TSI))
+ D2->setTypeAsWritten(*TInfoOrErr);
+ else
+ return TInfoOrErr.takeError();
+
+ if (auto LocOrErr = import(D->getTemplateKeywordLoc()))
+ D2->setTemplateKeywordLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
+
+ if (auto LocOrErr = import(D->getExternLoc()))
+ D2->setExternLoc(*LocOrErr);
+ else
+ return LocOrErr.takeError();
}
- SourceLocation POI = Importer.Import(D->getPointOfInstantiation());
- if (POI.isValid())
- D2->setPointOfInstantiation(POI);
- else if (D->getPointOfInstantiation().isValid())
- return nullptr;
+ if (D->getPointOfInstantiation().isValid()) {
+ if (auto POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
@@ -4586,13 +5217,14 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
LexicalDC->addDeclInternal(D2);
}
}
- if (D->isCompleteDefinition() && ImportDefinition(D, D2))
- return nullptr;
+ if (D->isCompleteDefinition())
+ if (Error Err = ImportDefinition(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
+ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
// If this variable has a definition in the translation unit we're coming
// from,
// but this particular declaration is not that definition, import the
@@ -4600,11 +5232,11 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
auto *Definition =
cast_or_null<VarDecl>(D->getTemplatedDecl()->getDefinition());
if (Definition && Definition != D->getTemplatedDecl()) {
- Decl *ImportedDef = Importer.Import(Definition->getDescribedVarTemplate());
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(
+ Definition->getDescribedVarTemplate()))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
// Import the major distinguishing characteristics of this variable template.
@@ -4612,8 +5244,8 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4621,14 +5253,13 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
assert(!DC->isFunctionOrMethod() &&
"Variable templates cannot be declared at function scope");
SmallVector<NamedDecl *, 4> ConflictingDecls;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
continue;
Decl *Found = FoundDecl;
- if (auto *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
+ if (VarTemplateDecl *FoundTemplate = dyn_cast<VarTemplateDecl>(Found)) {
if (IsStructuralMatch(D, FoundTemplate)) {
// The variable templates structurally match; call it the same template.
Importer.MapImported(D->getTemplatedDecl(),
@@ -4647,29 +5278,32 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
}
if (!Name)
- return nullptr;
+ // FIXME: Is it possible to get other error than name conflict?
+ // (Put this `if` into the previous `if`?)
+ return make_error<ImportError>(ImportError::NameConflict);
VarDecl *DTemplated = D->getTemplatedDecl();
// Import the type.
- QualType T = Importer.Import(DTemplated->getType());
- if (T.isNull())
- return nullptr;
+ // FIXME: Value not used?
+ ExpectedType TypeOrErr = import(DTemplated->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
// Create the declaration that is being templated.
- auto *ToTemplated = dyn_cast_or_null<VarDecl>(Importer.Import(DTemplated));
- if (!ToTemplated)
- return nullptr;
+ VarDecl *ToTemplated;
+ if (Error Err = importInto(ToTemplated, DTemplated))
+ return std::move(Err);
// Create the variable template declaration itself.
- TemplateParameterList *TemplateParams =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!TemplateParams)
- return nullptr;
+ auto TemplateParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!TemplateParamsOrErr)
+ return TemplateParamsOrErr.takeError();
VarTemplateDecl *ToVarTD;
if (GetImportedOrCreateDecl(ToVarTD, D, Importer.getToContext(), DC, Loc,
- Name, TemplateParams, ToTemplated))
+ Name, *TemplateParamsOrErr, ToTemplated))
return ToVarTD;
ToTemplated->setDescribedVarTemplate(ToVarTD);
@@ -4686,46 +5320,42 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
return ToVarTD;
}
-Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
+ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
// If this record has a definition in the translation unit we're coming from,
// but this particular declaration is not that definition, import the
// definition and map to that.
VarDecl *Definition = D->getDefinition();
if (Definition && Definition != D) {
- Decl *ImportedDef = Importer.Import(Definition);
- if (!ImportedDef)
- return nullptr;
-
- return Importer.MapImported(D, ImportedDef);
+ if (ExpectedDecl ImportedDefOrErr = import(Definition))
+ return Importer.MapImported(D, *ImportedDefOrErr);
+ else
+ return ImportedDefOrErr.takeError();
}
- auto *VarTemplate = cast_or_null<VarTemplateDecl>(
- Importer.Import(D->getSpecializedTemplate()));
- if (!VarTemplate)
- return nullptr;
+ VarTemplateDecl *VarTemplate;
+ if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate()))
+ return std::move(Err);
// Import the context of this declaration.
- DeclContext *DC = VarTemplate->getDeclContext();
- if (!DC)
- return nullptr;
-
- DeclContext *LexicalDC = DC;
- if (D->getDeclContext() != D->getLexicalDeclContext()) {
- LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
- if (!LexicalDC)
- return nullptr;
- }
+ DeclContext *DC, *LexicalDC;
+ if (Error Err = ImportDeclContext(D, DC, LexicalDC))
+ return std::move(Err);
// Import the location of this declaration.
- SourceLocation StartLoc = Importer.Import(D->getLocStart());
- SourceLocation IdLoc = Importer.Import(D->getLocation());
+ ExpectedSLoc BeginLocOrErr = import(D->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ auto IdLocOrErr = import(D->getLocation());
+ if (!IdLocOrErr)
+ return IdLocOrErr.takeError();
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (ImportTemplateArguments(D->getTemplateArgs().data(),
- D->getTemplateArgs().size(), TemplateArgs))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ return std::move(Err);
// Try to find an existing specialization with these template arguments.
void *InsertPos = nullptr;
@@ -4748,17 +5378,18 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
}
} else {
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
- return nullptr;
+ QualType T;
+ if (Error Err = importInto(T, D->getType()))
+ return std::move(Err);
- TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- if (D->getTypeSourceInfo() && !TInfo)
- return nullptr;
+ auto TInfoOrErr = import(D->getTypeSourceInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
TemplateArgumentListInfo ToTAInfo;
- if (ImportTemplateArgumentListInfo(D->getTemplateArgsInfo(), ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ D->getTemplateArgsInfo(), ToTAInfo))
+ return std::move(Err);
using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
// Create a new specialization.
@@ -4767,41 +5398,47 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
TemplateArgumentListInfo ArgInfos;
const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten();
// NOTE: FromTAArgsAsWritten and template parameter list are non-null.
- if (ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ArgInfos))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ *FromTAArgsAsWritten, ArgInfos))
+ return std::move(Err);
- TemplateParameterList *ToTPList = ImportTemplateParameterList(
- FromPartial->getTemplateParameters());
- if (!ToTPList)
- return nullptr;
+ auto ToTPListOrErr = ImportTemplateParameterList(
+ FromPartial->getTemplateParameters());
+ if (!ToTPListOrErr)
+ return ToTPListOrErr.takeError();
PartVarSpecDecl *ToPartial;
if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
- StartLoc, IdLoc, ToTPList, VarTemplate, T,
- TInfo, D->getStorageClass(), TemplateArgs,
- ArgInfos))
+ *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr,
+ VarTemplate, T, *TInfoOrErr,
+ D->getStorageClass(), TemplateArgs, ArgInfos))
return ToPartial;
- auto *FromInst = FromPartial->getInstantiatedFromMember();
- auto *ToInst = cast_or_null<PartVarSpecDecl>(Importer.Import(FromInst));
- if (FromInst && !ToInst)
- return nullptr;
+ if (Expected<PartVarSpecDecl *> ToInstOrErr = import(
+ FromPartial->getInstantiatedFromMember()))
+ ToPartial->setInstantiatedFromMember(*ToInstOrErr);
+ else
+ return ToInstOrErr.takeError();
- ToPartial->setInstantiatedFromMember(ToInst);
if (FromPartial->isMemberSpecialization())
ToPartial->setMemberSpecialization();
D2 = ToPartial;
+
} else { // Full specialization
- if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, StartLoc,
- IdLoc, VarTemplate, T, TInfo,
+ if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
+ *BeginLocOrErr, *IdLocOrErr, VarTemplate,
+ T, *TInfoOrErr,
D->getStorageClass(), TemplateArgs))
return D2;
}
- SourceLocation POI = D->getPointOfInstantiation();
- if (POI.isValid())
- D2->setPointOfInstantiation(Importer.Import(POI));
+ if (D->getPointOfInstantiation().isValid()) {
+ if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation()))
+ D2->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
D2->setSpecializationKind(D->getSpecializationKind());
D2->setTemplateArgsInfo(ToTAInfo);
@@ -4810,7 +5447,10 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
VarTemplate->AddSpecialization(D2, InsertPos);
// Import the qualifier, if any.
- D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ if (auto LocOrErr = import(D->getQualifierLoc()))
+ D2->setQualifierInfo(*LocOrErr);
+ else
+ return LocOrErr.takeError();
if (D->isConstexpr())
D2->setConstexpr(true);
@@ -4822,25 +5462,21 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl(
D2->setAccess(D->getAccess());
}
- // NOTE: isThisDeclarationADefinition() can return DeclarationOnly even if
- // declaration has initializer. Should this be fixed in the AST?.. Anyway,
- // we have to check the declaration for initializer - otherwise, it won't be
- // imported.
- if ((D->isThisDeclarationADefinition() || D->hasInit()) &&
- ImportDefinition(D, D2))
- return nullptr;
+ if (Error Err = ImportInitializer(D, D2))
+ return std::move(Err);
return D2;
}
-Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ExpectedDecl
+ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
SourceLocation Loc;
NamedDecl *ToD;
- if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
- return nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
if (ToD)
return ToD;
@@ -4849,13 +5485,13 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// type, and in the same context as the function we're importing.
if (!LexicalDC->isFunctionOrMethod()) {
unsigned IDNS = Decl::IDNS_Ordinary;
- SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (auto *FoundFunction = dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
+ if (auto *FoundFunction =
+ dyn_cast<FunctionTemplateDecl>(FoundDecl)) {
if (FoundFunction->hasExternalFormalLinkage() &&
D->hasExternalFormalLinkage()) {
if (IsStructuralMatch(D, FoundFunction)) {
@@ -4865,22 +5501,22 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
}
}
}
+ // TODO: handle conflicting names
}
}
- TemplateParameterList *Params =
- ImportTemplateParameterList(D->getTemplateParameters());
- if (!Params)
- return nullptr;
+ auto ParamsOrErr = ImportTemplateParameterList(
+ D->getTemplateParameters());
+ if (!ParamsOrErr)
+ return ParamsOrErr.takeError();
- auto *TemplatedFD =
- cast_or_null<FunctionDecl>(Importer.Import(D->getTemplatedDecl()));
- if (!TemplatedFD)
- return nullptr;
+ FunctionDecl *TemplatedFD;
+ if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl()))
+ return std::move(Err);
FunctionTemplateDecl *ToFunc;
if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
- Params, TemplatedFD))
+ *ParamsOrErr, TemplatedFD))
return ToFunc;
TemplatedFD->setDescribedFunctionTemplate(ToFunc);
@@ -4895,2073 +5531,2189 @@ Decl *ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// Import Statements
//----------------------------------------------------------------------------
-DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) {
- if (DG.isNull())
- return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0);
- size_t NumDecls = DG.end() - DG.begin();
- SmallVector<Decl *, 1> ToDecls(NumDecls);
- auto &_Importer = this->Importer;
- std::transform(DG.begin(), DG.end(), ToDecls.begin(),
- [&_Importer](Decl *D) -> Decl * {
- return _Importer.Import(D);
- });
- return DeclGroupRef::Create(Importer.getToContext(),
- ToDecls.begin(),
- NumDecls);
+ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) {
+ Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node)
+ << S->getStmtClassName();
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
- Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
- << S->getStmtClassName();
- return nullptr;
-}
-Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
+ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
// ToII is nullptr when no symbolic name is given for output operand
// see ParseStmtAsm::ParseAsmOperandsOpt
- if (!ToII && S->getOutputIdentifier(I))
- return nullptr;
Names.push_back(ToII);
}
+
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getInputIdentifier(I));
// ToII is nullptr when no symbolic name is given for input operand
// see ParseStmtAsm::ParseAsmOperandsOpt
- if (!ToII && S->getInputIdentifier(I))
- return nullptr;
Names.push_back(ToII);
}
SmallVector<StringLiteral *, 4> Clobbers;
for (unsigned I = 0, E = S->getNumClobbers(); I != E; I++) {
- auto *Clobber = cast_or_null<StringLiteral>(
- Importer.Import(S->getClobberStringLiteral(I)));
- if (!Clobber)
- return nullptr;
- Clobbers.push_back(Clobber);
+ if (auto ClobberOrErr = import(S->getClobberStringLiteral(I)))
+ Clobbers.push_back(*ClobberOrErr);
+ else
+ return ClobberOrErr.takeError();
+
}
SmallVector<StringLiteral *, 4> Constraints;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
- auto *Output = cast_or_null<StringLiteral>(
- Importer.Import(S->getOutputConstraintLiteral(I)));
- if (!Output)
- return nullptr;
- Constraints.push_back(Output);
+ if (auto OutputOrErr = import(S->getOutputConstraintLiteral(I)))
+ Constraints.push_back(*OutputOrErr);
+ else
+ return OutputOrErr.takeError();
}
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
- auto *Input = cast_or_null<StringLiteral>(
- Importer.Import(S->getInputConstraintLiteral(I)));
- if (!Input)
- return nullptr;
- Constraints.push_back(Input);
+ if (auto InputOrErr = import(S->getInputConstraintLiteral(I)))
+ Constraints.push_back(*InputOrErr);
+ else
+ return InputOrErr.takeError();
}
SmallVector<Expr *, 4> Exprs(S->getNumOutputs() + S->getNumInputs());
- if (ImportContainerChecked(S->outputs(), Exprs))
- return nullptr;
-
- if (ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs()))
- return nullptr;
-
- auto *AsmStr = cast_or_null<StringLiteral>(
- Importer.Import(S->getAsmString()));
- if (!AsmStr)
- return nullptr;
+ if (Error Err = ImportContainerChecked(S->outputs(), Exprs))
+ return std::move(Err);
+
+ if (Error Err = ImportArrayChecked(
+ S->inputs(), Exprs.begin() + S->getNumOutputs()))
+ return std::move(Err);
+
+ ExpectedSLoc AsmLocOrErr = import(S->getAsmLoc());
+ if (!AsmLocOrErr)
+ return AsmLocOrErr.takeError();
+ auto AsmStrOrErr = import(S->getAsmString());
+ if (!AsmStrOrErr)
+ return AsmStrOrErr.takeError();
+ ExpectedSLoc RParenLocOrErr = import(S->getRParenLoc());
+ if (!RParenLocOrErr)
+ return RParenLocOrErr.takeError();
return new (Importer.getToContext()) GCCAsmStmt(
- Importer.getToContext(),
- Importer.Import(S->getAsmLoc()),
- S->isSimple(),
- S->isVolatile(),
- S->getNumOutputs(),
- S->getNumInputs(),
- Names.data(),
- Constraints.data(),
- Exprs.data(),
- AsmStr,
- S->getNumClobbers(),
- Clobbers.data(),
- Importer.Import(S->getRParenLoc()));
-}
-
-Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
- DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup());
- for (auto *ToD : ToDG) {
- if (!ToD)
- return nullptr;
- }
- SourceLocation ToStartLoc = Importer.Import(S->getStartLoc());
- SourceLocation ToEndLoc = Importer.Import(S->getEndLoc());
- return new (Importer.getToContext()) DeclStmt(ToDG, ToStartLoc, ToEndLoc);
-}
+ Importer.getToContext(),
+ *AsmLocOrErr,
+ S->isSimple(),
+ S->isVolatile(),
+ S->getNumOutputs(),
+ S->getNumInputs(),
+ Names.data(),
+ Constraints.data(),
+ Exprs.data(),
+ *AsmStrOrErr,
+ S->getNumClobbers(),
+ Clobbers.data(),
+ *RParenLocOrErr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitDeclStmt(DeclStmt *S) {
+ auto Imp = importSeq(S->getDeclGroup(), S->getBeginLoc(), S->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ DeclGroupRef ToDG;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToDG, ToBeginLoc, ToEndLoc) = *Imp;
+
+ return new (Importer.getToContext()) DeclStmt(ToDG, ToBeginLoc, ToEndLoc);
+}
+
+ExpectedStmt ASTNodeImporter::VisitNullStmt(NullStmt *S) {
+ ExpectedSLoc ToSemiLocOrErr = import(S->getSemiLoc());
+ if (!ToSemiLocOrErr)
+ return ToSemiLocOrErr.takeError();
+ return new (Importer.getToContext()) NullStmt(
+ *ToSemiLocOrErr, S->hasLeadingEmptyMacro());
+}
+
+ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
+ SmallVector<Stmt *, 8> ToStmts(S->size());
-Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
- SourceLocation ToSemiLoc = Importer.Import(S->getSemiLoc());
- return new (Importer.getToContext()) NullStmt(ToSemiLoc,
- S->hasLeadingEmptyMacro());
-}
+ if (Error Err = ImportContainerChecked(S->body(), ToStmts))
+ return std::move(Err);
-Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
- SmallVector<Stmt *, 8> ToStmts(S->size());
+ ExpectedSLoc ToLBracLocOrErr = import(S->getLBracLoc());
+ if (!ToLBracLocOrErr)
+ return ToLBracLocOrErr.takeError();
- if (ImportContainerChecked(S->body(), ToStmts))
- return nullptr;
+ ExpectedSLoc ToRBracLocOrErr = import(S->getRBracLoc());
+ if (!ToRBracLocOrErr)
+ return ToRBracLocOrErr.takeError();
- SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc());
- SourceLocation ToRBraceLoc = Importer.Import(S->getRBracLoc());
- return CompoundStmt::Create(Importer.getToContext(), ToStmts, ToLBraceLoc,
- ToRBraceLoc);
+ return CompoundStmt::Create(
+ Importer.getToContext(), ToStmts,
+ *ToLBracLocOrErr, *ToRBracLocOrErr);
}
-Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
- Expr *ToLHS = Importer.Import(S->getLHS());
- if (!ToLHS)
- return nullptr;
- Expr *ToRHS = Importer.Import(S->getRHS());
- if (!ToRHS && S->getRHS())
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc());
- SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- auto *ToStmt = new (Importer.getToContext())
- CaseStmt(ToLHS, ToRHS, ToCaseLoc, ToEllipsisLoc, ToColonLoc);
+ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
+ auto Imp = importSeq(
+ S->getLHS(), S->getRHS(), S->getSubStmt(), S->getCaseLoc(),
+ S->getEllipsisLoc(), S->getColonLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToLHS, *ToRHS;
+ Stmt *ToSubStmt;
+ SourceLocation ToCaseLoc, ToEllipsisLoc, ToColonLoc;
+ std::tie(ToLHS, ToRHS, ToSubStmt, ToCaseLoc, ToEllipsisLoc, ToColonLoc) =
+ *Imp;
+
+ auto *ToStmt = CaseStmt::Create(Importer.getToContext(), ToLHS, ToRHS,
+ ToCaseLoc, ToEllipsisLoc, ToColonLoc);
ToStmt->setSubStmt(ToSubStmt);
+
return ToStmt;
}
-Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
- SourceLocation ToDefaultLoc = Importer.Import(S->getDefaultLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) DefaultStmt(ToDefaultLoc, ToColonLoc,
- ToSubStmt);
-}
+ExpectedStmt ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) {
+ auto Imp = importSeq(S->getDefaultLoc(), S->getColonLoc(), S->getSubStmt());
+ if (!Imp)
+ return Imp.takeError();
-Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
- SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc());
- auto *ToLabelDecl = cast_or_null<LabelDecl>(Importer.Import(S->getDecl()));
- if (!ToLabelDecl && S->getDecl())
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) LabelStmt(ToIdentLoc, ToLabelDecl,
- ToSubStmt);
-}
+ SourceLocation ToDefaultLoc, ToColonLoc;
+ Stmt *ToSubStmt;
+ std::tie(ToDefaultLoc, ToColonLoc, ToSubStmt) = *Imp;
-Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
- SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc());
- ArrayRef<const Attr*> FromAttrs(S->getAttrs());
- SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
- if (ImportContainerChecked(FromAttrs, ToAttrs))
- return nullptr;
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return AttributedStmt::Create(Importer.getToContext(), ToAttrLoc,
- ToAttrs, ToSubStmt);
+ return new (Importer.getToContext()) DefaultStmt(
+ ToDefaultLoc, ToColonLoc, ToSubStmt);
}
-Stmt *ASTNodeImporter::VisitIfStmt(IfStmt *S) {
- SourceLocation ToIfLoc = Importer.Import(S->getIfLoc());
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- Stmt *ToThenStmt = Importer.Import(S->getThen());
- if (!ToThenStmt && S->getThen())
- return nullptr;
- SourceLocation ToElseLoc = Importer.Import(S->getElseLoc());
- Stmt *ToElseStmt = Importer.Import(S->getElse());
- if (!ToElseStmt && S->getElse())
- return nullptr;
- return new (Importer.getToContext()) IfStmt(Importer.getToContext(),
- ToIfLoc, S->isConstexpr(),
- ToInit,
- ToConditionVariable,
- ToCondition, ToThenStmt,
- ToElseLoc, ToElseStmt);
+ExpectedStmt ASTNodeImporter::VisitLabelStmt(LabelStmt *S) {
+ auto Imp = importSeq(S->getIdentLoc(), S->getDecl(), S->getSubStmt());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToIdentLoc;
+ LabelDecl *ToLabelDecl;
+ Stmt *ToSubStmt;
+ std::tie(ToIdentLoc, ToLabelDecl, ToSubStmt) = *Imp;
+
+ return new (Importer.getToContext()) LabelStmt(
+ ToIdentLoc, ToLabelDecl, ToSubStmt);
}
-Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- auto *ToStmt = new (Importer.getToContext()) SwitchStmt(
- Importer.getToContext(), ToInit,
- ToConditionVariable, ToCondition);
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) {
+ ExpectedSLoc ToAttrLocOrErr = import(S->getAttrLoc());
+ if (!ToAttrLocOrErr)
+ return ToAttrLocOrErr.takeError();
+ ArrayRef<const Attr*> FromAttrs(S->getAttrs());
+ SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size());
+ if (Error Err = ImportContainerChecked(FromAttrs, ToAttrs))
+ return std::move(Err);
+ ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt());
+ if (!ToSubStmtOrErr)
+ return ToSubStmtOrErr.takeError();
+
+ return AttributedStmt::Create(
+ Importer.getToContext(), *ToAttrLocOrErr, ToAttrs, *ToSubStmtOrErr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
+ auto Imp = importSeq(
+ S->getIfLoc(), S->getInit(), S->getConditionVariable(), S->getCond(),
+ S->getThen(), S->getElseLoc(), S->getElse());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToIfLoc, ToElseLoc;
+ Stmt *ToInit, *ToThen, *ToElse;
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ std::tie(
+ ToIfLoc, ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc, ToElse) =
+ *Imp;
+
+ return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
+ ToInit, ToConditionVariable, ToCond, ToThen, ToElseLoc,
+ ToElse);
+}
+
+ExpectedStmt ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) {
+ auto Imp = importSeq(
+ S->getInit(), S->getConditionVariable(), S->getCond(),
+ S->getBody(), S->getSwitchLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToInit, *ToBody;
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ SourceLocation ToSwitchLoc;
+ std::tie(ToInit, ToConditionVariable, ToCond, ToBody, ToSwitchLoc) = *Imp;
+
+ auto *ToStmt = SwitchStmt::Create(Importer.getToContext(), ToInit,
+ ToConditionVariable, ToCond);
ToStmt->setBody(ToBody);
- ToStmt->setSwitchLoc(Importer.Import(S->getSwitchLoc()));
+ ToStmt->setSwitchLoc(ToSwitchLoc);
+
// Now we have to re-chain the cases.
SwitchCase *LastChainedSwitchCase = nullptr;
for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr;
SC = SC->getNextSwitchCase()) {
- auto *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC));
- if (!ToSC)
- return nullptr;
+ Expected<SwitchCase *> ToSCOrErr = import(SC);
+ if (!ToSCOrErr)
+ return ToSCOrErr.takeError();
if (LastChainedSwitchCase)
- LastChainedSwitchCase->setNextSwitchCase(ToSC);
+ LastChainedSwitchCase->setNextSwitchCase(*ToSCOrErr);
else
- ToStmt->setSwitchCaseList(ToSC);
- LastChainedSwitchCase = ToSC;
+ ToStmt->setSwitchCaseList(*ToSCOrErr);
+ LastChainedSwitchCase = *ToSCOrErr;
}
+
return ToStmt;
}
-Stmt *ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
- return new (Importer.getToContext()) WhileStmt(Importer.getToContext(),
- ToConditionVariable,
- ToCondition, ToBody,
- ToWhileLoc);
+ExpectedStmt ASTNodeImporter::VisitWhileStmt(WhileStmt *S) {
+ auto Imp = importSeq(
+ S->getConditionVariable(), S->getCond(), S->getBody(), S->getWhileLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ VarDecl *ToConditionVariable;
+ Expr *ToCond;
+ Stmt *ToBody;
+ SourceLocation ToWhileLoc;
+ std::tie(ToConditionVariable, ToCond, ToBody, ToWhileLoc) = *Imp;
+
+ return WhileStmt::Create(Importer.getToContext(), ToConditionVariable, ToCond,
+ ToBody, ToWhileLoc);
}
-Stmt *ASTNodeImporter::VisitDoStmt(DoStmt *S) {
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- SourceLocation ToDoLoc = Importer.Import(S->getDoLoc());
- SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) DoStmt(ToBody, ToCondition,
- ToDoLoc, ToWhileLoc,
- ToRParenLoc);
+ExpectedStmt ASTNodeImporter::VisitDoStmt(DoStmt *S) {
+ auto Imp = importSeq(
+ S->getBody(), S->getCond(), S->getDoLoc(), S->getWhileLoc(),
+ S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToBody;
+ Expr *ToCond;
+ SourceLocation ToDoLoc, ToWhileLoc, ToRParenLoc;
+ std::tie(ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) DoStmt(
+ ToBody, ToCond, ToDoLoc, ToWhileLoc, ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitForStmt(ForStmt *S) {
- Stmt *ToInit = Importer.Import(S->getInit());
- if (!ToInit && S->getInit())
- return nullptr;
- Expr *ToCondition = Importer.Import(S->getCond());
- if (!ToCondition && S->getCond())
- return nullptr;
- VarDecl *ToConditionVariable = nullptr;
- if (VarDecl *FromConditionVariable = S->getConditionVariable()) {
- ToConditionVariable =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable));
- if (!ToConditionVariable)
- return nullptr;
- }
- Expr *ToInc = Importer.Import(S->getInc());
- if (!ToInc && S->getInc())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToLParenLoc = Importer.Import(S->getLParenLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) ForStmt(Importer.getToContext(),
- ToInit, ToCondition,
- ToConditionVariable,
- ToInc, ToBody,
- ToForLoc, ToLParenLoc,
- ToRParenLoc);
-}
-
-Stmt *ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
- LabelDecl *ToLabel = nullptr;
- if (LabelDecl *FromLabel = S->getLabel()) {
- ToLabel = dyn_cast_or_null<LabelDecl>(Importer.Import(FromLabel));
- if (!ToLabel)
- return nullptr;
- }
- SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
- SourceLocation ToLabelLoc = Importer.Import(S->getLabelLoc());
- return new (Importer.getToContext()) GotoStmt(ToLabel,
- ToGotoLoc, ToLabelLoc);
+ExpectedStmt ASTNodeImporter::VisitForStmt(ForStmt *S) {
+ auto Imp = importSeq(
+ S->getInit(), S->getCond(), S->getConditionVariable(), S->getInc(),
+ S->getBody(), S->getForLoc(), S->getLParenLoc(), S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToInit;
+ Expr *ToCond, *ToInc;
+ VarDecl *ToConditionVariable;
+ Stmt *ToBody;
+ SourceLocation ToForLoc, ToLParenLoc, ToRParenLoc;
+ std::tie(
+ ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc,
+ ToLParenLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) ForStmt(
+ Importer.getToContext(),
+ ToInit, ToCond, ToConditionVariable, ToInc, ToBody, ToForLoc, ToLParenLoc,
+ ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
- SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc());
- SourceLocation ToStarLoc = Importer.Import(S->getStarLoc());
- Expr *ToTarget = Importer.Import(S->getTarget());
- if (!ToTarget && S->getTarget())
- return nullptr;
- return new (Importer.getToContext()) IndirectGotoStmt(ToGotoLoc, ToStarLoc,
- ToTarget);
+ExpectedStmt ASTNodeImporter::VisitGotoStmt(GotoStmt *S) {
+ auto Imp = importSeq(S->getLabel(), S->getGotoLoc(), S->getLabelLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ LabelDecl *ToLabel;
+ SourceLocation ToGotoLoc, ToLabelLoc;
+ std::tie(ToLabel, ToGotoLoc, ToLabelLoc) = *Imp;
+
+ return new (Importer.getToContext()) GotoStmt(
+ ToLabel, ToGotoLoc, ToLabelLoc);
}
-Stmt *ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
- SourceLocation ToContinueLoc = Importer.Import(S->getContinueLoc());
- return new (Importer.getToContext()) ContinueStmt(ToContinueLoc);
+ExpectedStmt ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ auto Imp = importSeq(S->getGotoLoc(), S->getStarLoc(), S->getTarget());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToGotoLoc, ToStarLoc;
+ Expr *ToTarget;
+ std::tie(ToGotoLoc, ToStarLoc, ToTarget) = *Imp;
+
+ return new (Importer.getToContext()) IndirectGotoStmt(
+ ToGotoLoc, ToStarLoc, ToTarget);
}
-Stmt *ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
- SourceLocation ToBreakLoc = Importer.Import(S->getBreakLoc());
- return new (Importer.getToContext()) BreakStmt(ToBreakLoc);
+ExpectedStmt ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) {
+ ExpectedSLoc ToContinueLocOrErr = import(S->getContinueLoc());
+ if (!ToContinueLocOrErr)
+ return ToContinueLocOrErr.takeError();
+ return new (Importer.getToContext()) ContinueStmt(*ToContinueLocOrErr);
}
-Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
- SourceLocation ToRetLoc = Importer.Import(S->getReturnLoc());
- Expr *ToRetExpr = Importer.Import(S->getRetValue());
- if (!ToRetExpr && S->getRetValue())
- return nullptr;
- auto *NRVOCandidate = const_cast<VarDecl *>(S->getNRVOCandidate());
- auto *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate));
- if (!ToNRVOCandidate && NRVOCandidate)
- return nullptr;
- return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr,
- ToNRVOCandidate);
+ExpectedStmt ASTNodeImporter::VisitBreakStmt(BreakStmt *S) {
+ auto ToBreakLocOrErr = import(S->getBreakLoc());
+ if (!ToBreakLocOrErr)
+ return ToBreakLocOrErr.takeError();
+ return new (Importer.getToContext()) BreakStmt(*ToBreakLocOrErr);
}
-Stmt *ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
- SourceLocation ToCatchLoc = Importer.Import(S->getCatchLoc());
- VarDecl *ToExceptionDecl = nullptr;
- if (VarDecl *FromExceptionDecl = S->getExceptionDecl()) {
- ToExceptionDecl =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
- if (!ToExceptionDecl)
- return nullptr;
- }
- Stmt *ToHandlerBlock = Importer.Import(S->getHandlerBlock());
- if (!ToHandlerBlock && S->getHandlerBlock())
- return nullptr;
- return new (Importer.getToContext()) CXXCatchStmt(ToCatchLoc,
- ToExceptionDecl,
- ToHandlerBlock);
+ExpectedStmt ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) {
+ auto Imp = importSeq(
+ S->getReturnLoc(), S->getRetValue(), S->getNRVOCandidate());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToReturnLoc;
+ Expr *ToRetValue;
+ const VarDecl *ToNRVOCandidate;
+ std::tie(ToReturnLoc, ToRetValue, ToNRVOCandidate) = *Imp;
+
+ return ReturnStmt::Create(Importer.getToContext(), ToReturnLoc, ToRetValue,
+ ToNRVOCandidate);
}
-Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
- SourceLocation ToTryLoc = Importer.Import(S->getTryLoc());
- Stmt *ToTryBlock = Importer.Import(S->getTryBlock());
- if (!ToTryBlock && S->getTryBlock())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ auto Imp = importSeq(
+ S->getCatchLoc(), S->getExceptionDecl(), S->getHandlerBlock());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToCatchLoc;
+ VarDecl *ToExceptionDecl;
+ Stmt *ToHandlerBlock;
+ std::tie(ToCatchLoc, ToExceptionDecl, ToHandlerBlock) = *Imp;
+
+ return new (Importer.getToContext()) CXXCatchStmt (
+ ToCatchLoc, ToExceptionDecl, ToHandlerBlock);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
+ ExpectedSLoc ToTryLocOrErr = import(S->getTryLoc());
+ if (!ToTryLocOrErr)
+ return ToTryLocOrErr.takeError();
+
+ ExpectedStmt ToTryBlockOrErr = import(S->getTryBlock());
+ if (!ToTryBlockOrErr)
+ return ToTryBlockOrErr.takeError();
+
SmallVector<Stmt *, 1> ToHandlers(S->getNumHandlers());
for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) {
CXXCatchStmt *FromHandler = S->getHandler(HI);
- if (Stmt *ToHandler = Importer.Import(FromHandler))
- ToHandlers[HI] = ToHandler;
+ if (auto ToHandlerOrErr = import(FromHandler))
+ ToHandlers[HI] = *ToHandlerOrErr;
else
- return nullptr;
- }
- return CXXTryStmt::Create(Importer.getToContext(), ToTryLoc, ToTryBlock,
- ToHandlers);
-}
-
-Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
- auto *ToRange =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt()));
- if (!ToRange && S->getRangeStmt())
- return nullptr;
- auto *ToBegin =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginStmt()));
- if (!ToBegin && S->getBeginStmt())
- return nullptr;
- auto *ToEnd =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getEndStmt()));
- if (!ToEnd && S->getEndStmt())
- return nullptr;
- Expr *ToCond = Importer.Import(S->getCond());
- if (!ToCond && S->getCond())
- return nullptr;
- Expr *ToInc = Importer.Import(S->getInc());
- if (!ToInc && S->getInc())
- return nullptr;
- auto *ToLoopVar =
- dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt()));
- if (!ToLoopVar && S->getLoopVarStmt())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToCoawaitLoc = Importer.Import(S->getCoawaitLoc());
- SourceLocation ToColonLoc = Importer.Import(S->getColonLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) CXXForRangeStmt(ToRange, ToBegin, ToEnd,
- ToCond, ToInc,
- ToLoopVar, ToBody,
- ToForLoc, ToCoawaitLoc,
- ToColonLoc, ToRParenLoc);
-}
-
-Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
- Stmt *ToElem = Importer.Import(S->getElement());
- if (!ToElem && S->getElement())
- return nullptr;
- Expr *ToCollect = Importer.Import(S->getCollection());
- if (!ToCollect && S->getCollection())
- return nullptr;
- Stmt *ToBody = Importer.Import(S->getBody());
- if (!ToBody && S->getBody())
- return nullptr;
- SourceLocation ToForLoc = Importer.Import(S->getForLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- return new (Importer.getToContext()) ObjCForCollectionStmt(ToElem,
- ToCollect,
- ToBody, ToForLoc,
+ return ToHandlerOrErr.takeError();
+ }
+
+ return CXXTryStmt::Create(
+ Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ auto Imp1 = importSeq(
+ S->getInit(), S->getRangeStmt(), S->getBeginStmt(), S->getEndStmt(),
+ S->getCond(), S->getInc(), S->getLoopVarStmt(), S->getBody());
+ if (!Imp1)
+ return Imp1.takeError();
+ auto Imp2 = importSeq(
+ S->getForLoc(), S->getCoawaitLoc(), S->getColonLoc(), S->getRParenLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+
+ DeclStmt *ToRangeStmt, *ToBeginStmt, *ToEndStmt, *ToLoopVarStmt;
+ Expr *ToCond, *ToInc;
+ Stmt *ToInit, *ToBody;
+ std::tie(
+ ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
+ ToBody) = *Imp1;
+ SourceLocation ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc;
+ std::tie(ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc) = *Imp2;
+
+ return new (Importer.getToContext()) CXXForRangeStmt(
+ ToInit, ToRangeStmt, ToBeginStmt, ToEndStmt, ToCond, ToInc, ToLoopVarStmt,
+ ToBody, ToForLoc, ToCoawaitLoc, ToColonLoc, ToRParenLoc);
+}
+
+ExpectedStmt
+ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ auto Imp = importSeq(
+ S->getElement(), S->getCollection(), S->getBody(),
+ S->getForLoc(), S->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ Stmt *ToElement, *ToBody;
+ Expr *ToCollection;
+ SourceLocation ToForLoc, ToRParenLoc;
+ std::tie(ToElement, ToCollection, ToBody, ToForLoc, ToRParenLoc) = *Imp;
+
+ return new (Importer.getToContext()) ObjCForCollectionStmt(ToElement,
+ ToCollection,
+ ToBody,
+ ToForLoc,
ToRParenLoc);
}
-Stmt *ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- SourceLocation ToAtCatchLoc = Importer.Import(S->getAtCatchLoc());
- SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc());
- VarDecl *ToExceptionDecl = nullptr;
- if (VarDecl *FromExceptionDecl = S->getCatchParamDecl()) {
- ToExceptionDecl =
- dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl));
- if (!ToExceptionDecl)
- return nullptr;
- }
- Stmt *ToBody = Importer.Import(S->getCatchBody());
- if (!ToBody && S->getCatchBody())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtCatchStmt(ToAtCatchLoc,
- ToRParenLoc,
- ToExceptionDecl,
- ToBody);
+ExpectedStmt ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ auto Imp = importSeq(
+ S->getAtCatchLoc(), S->getRParenLoc(), S->getCatchParamDecl(),
+ S->getCatchBody());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToAtCatchLoc, ToRParenLoc;
+ VarDecl *ToCatchParamDecl;
+ Stmt *ToCatchBody;
+ std::tie(ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody) = *Imp;
+
+ return new (Importer.getToContext()) ObjCAtCatchStmt (
+ ToAtCatchLoc, ToRParenLoc, ToCatchParamDecl, ToCatchBody);
}
-Stmt *ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
- SourceLocation ToAtFinallyLoc = Importer.Import(S->getAtFinallyLoc());
- Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyBody());
- if (!ToAtFinallyStmt && S->getFinallyBody())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtFinallyStmt(ToAtFinallyLoc,
- ToAtFinallyStmt);
+ExpectedStmt ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ ExpectedSLoc ToAtFinallyLocOrErr = import(S->getAtFinallyLoc());
+ if (!ToAtFinallyLocOrErr)
+ return ToAtFinallyLocOrErr.takeError();
+ ExpectedStmt ToAtFinallyStmtOrErr = import(S->getFinallyBody());
+ if (!ToAtFinallyStmtOrErr)
+ return ToAtFinallyStmtOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAtFinallyStmt(*ToAtFinallyLocOrErr,
+ *ToAtFinallyStmtOrErr);
}
-Stmt *ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
- SourceLocation ToAtTryLoc = Importer.Import(S->getAtTryLoc());
- Stmt *ToAtTryStmt = Importer.Import(S->getTryBody());
- if (!ToAtTryStmt && S->getTryBody())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ auto Imp = importSeq(
+ S->getAtTryLoc(), S->getTryBody(), S->getFinallyStmt());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToAtTryLoc;
+ Stmt *ToTryBody, *ToFinallyStmt;
+ std::tie(ToAtTryLoc, ToTryBody, ToFinallyStmt) = *Imp;
+
SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts());
for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) {
ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI);
- if (Stmt *ToCatchStmt = Importer.Import(FromCatchStmt))
- ToCatchStmts[CI] = ToCatchStmt;
+ if (ExpectedStmt ToCatchStmtOrErr = import(FromCatchStmt))
+ ToCatchStmts[CI] = *ToCatchStmtOrErr;
else
- return nullptr;
+ return ToCatchStmtOrErr.takeError();
}
- Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyStmt());
- if (!ToAtFinallyStmt && S->getFinallyStmt())
- return nullptr;
+
return ObjCAtTryStmt::Create(Importer.getToContext(),
- ToAtTryLoc, ToAtTryStmt,
+ ToAtTryLoc, ToTryBody,
ToCatchStmts.begin(), ToCatchStmts.size(),
- ToAtFinallyStmt);
+ ToFinallyStmt);
}
-Stmt *ASTNodeImporter::VisitObjCAtSynchronizedStmt
+ExpectedStmt ASTNodeImporter::VisitObjCAtSynchronizedStmt
(ObjCAtSynchronizedStmt *S) {
- SourceLocation ToAtSynchronizedLoc =
- Importer.Import(S->getAtSynchronizedLoc());
- Expr *ToSynchExpr = Importer.Import(S->getSynchExpr());
- if (!ToSynchExpr && S->getSynchExpr())
- return nullptr;
- Stmt *ToSynchBody = Importer.Import(S->getSynchBody());
- if (!ToSynchBody && S->getSynchBody())
- return nullptr;
+ auto Imp = importSeq(
+ S->getAtSynchronizedLoc(), S->getSynchExpr(), S->getSynchBody());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToAtSynchronizedLoc;
+ Expr *ToSynchExpr;
+ Stmt *ToSynchBody;
+ std::tie(ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody) = *Imp;
+
return new (Importer.getToContext()) ObjCAtSynchronizedStmt(
ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody);
}
-Stmt *ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
- SourceLocation ToAtThrowLoc = Importer.Import(S->getThrowLoc());
- Expr *ToThrow = Importer.Import(S->getThrowExpr());
- if (!ToThrow && S->getThrowExpr())
- return nullptr;
- return new (Importer.getToContext()) ObjCAtThrowStmt(ToAtThrowLoc, ToThrow);
+ExpectedStmt ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ ExpectedSLoc ToThrowLocOrErr = import(S->getThrowLoc());
+ if (!ToThrowLocOrErr)
+ return ToThrowLocOrErr.takeError();
+ ExpectedExpr ToThrowExprOrErr = import(S->getThrowExpr());
+ if (!ToThrowExprOrErr)
+ return ToThrowExprOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAtThrowStmt(
+ *ToThrowLocOrErr, *ToThrowExprOrErr);
}
-Stmt *ASTNodeImporter::VisitObjCAutoreleasePoolStmt
- (ObjCAutoreleasePoolStmt *S) {
- SourceLocation ToAtLoc = Importer.Import(S->getAtLoc());
- Stmt *ToSubStmt = Importer.Import(S->getSubStmt());
- if (!ToSubStmt && S->getSubStmt())
- return nullptr;
- return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(ToAtLoc,
- ToSubStmt);
+ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt(
+ ObjCAutoreleasePoolStmt *S) {
+ ExpectedSLoc ToAtLocOrErr = import(S->getAtLoc());
+ if (!ToAtLocOrErr)
+ return ToAtLocOrErr.takeError();
+ ExpectedStmt ToSubStmtOrErr = import(S->getSubStmt());
+ if (!ToSubStmtOrErr)
+ return ToSubStmtOrErr.takeError();
+ return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(*ToAtLocOrErr,
+ *ToSubStmtOrErr);
}
//----------------------------------------------------------------------------
// Import Expressions
//----------------------------------------------------------------------------
-Expr *ASTNodeImporter::VisitExpr(Expr *E) {
- Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node)
- << E->getStmtClassName();
- return nullptr;
-}
-
-Expr *ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr && E->getSubExpr())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(E->getWrittenTypeInfo());
- if (!TInfo)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
+ Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node)
+ << E->getStmtClassName();
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
+}
+
+ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
+ auto Imp = importSeq(
+ E->getBuiltinLoc(), E->getSubExpr(), E->getWrittenTypeInfo(),
+ E->getRParenLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToBuiltinLoc, ToRParenLoc;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToWrittenTypeInfo;
+ QualType ToType;
+ std::tie(ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType) =
+ *Imp;
return new (Importer.getToContext()) VAArgExpr(
- Importer.Import(E->getBuiltinLoc()), SubExpr, TInfo,
- Importer.Import(E->getRParenLoc()), T, E->isMicrosoftABI());
+ ToBuiltinLoc, ToSubExpr, ToWrittenTypeInfo, ToRParenLoc, ToType,
+ E->isMicrosoftABI());
}
-Expr *ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
- return new (Importer.getToContext()) GNUNullExpr(
- T, Importer.Import(E->getLocStart()));
+ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
+ ExpectedType TypeOrErr = import(E->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
+
+ ExpectedSLoc BeginLocOrErr = import(E->getBeginLoc());
+ if (!BeginLocOrErr)
+ return BeginLocOrErr.takeError();
+
+ return new (Importer.getToContext()) GNUNullExpr(*TypeOrErr, *BeginLocOrErr);
}
-Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getType(), E->getFunctionName());
+ if (!Imp)
+ return Imp.takeError();
- auto *SL = cast_or_null<StringLiteral>(Importer.Import(E->getFunctionName()));
- if (!SL && E->getFunctionName())
- return nullptr;
+ SourceLocation ToBeginLoc;
+ QualType ToType;
+ StringLiteral *ToFunctionName;
+ std::tie(ToBeginLoc, ToType, ToFunctionName) = *Imp;
- return new (Importer.getToContext()) PredefinedExpr(
- Importer.Import(E->getLocStart()), T, E->getIdentType(), SL);
+ return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
+ E->getIdentKind(), ToFunctionName);
}
-Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
- auto *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
- if (!ToD)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ auto Imp = importSeq(
+ E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDecl(),
+ E->getLocation(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ NestedNameSpecifierLoc ToQualifierLoc;
+ SourceLocation ToTemplateKeywordLoc, ToLocation;
+ ValueDecl *ToDecl;
+ QualType ToType;
+ std::tie(ToQualifierLoc, ToTemplateKeywordLoc, ToDecl, ToLocation, ToType) =
+ *Imp;
- NamedDecl *FoundD = nullptr;
+ NamedDecl *ToFoundD = nullptr;
if (E->getDecl() != E->getFoundDecl()) {
- FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
- if (!FoundD)
- return nullptr;
+ auto FoundDOrErr = import(E->getFoundDecl());
+ if (!FoundDOrErr)
+ return FoundDOrErr.takeError();
+ ToFoundD = *FoundDOrErr;
}
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
TemplateArgumentListInfo ToTAInfo;
- TemplateArgumentListInfo *ResInfo = nullptr;
+ TemplateArgumentListInfo *ToResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
- ResInfo = &ToTAInfo;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
+ ToResInfo = &ToTAInfo;
}
- DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- ToD,
- E->refersToEnclosingVariableOrCapture(),
- Importer.Import(E->getLocation()),
- T, E->getValueKind(),
- FoundD, ResInfo);
+ auto *ToE = DeclRefExpr::Create(
+ Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc, ToDecl,
+ E->refersToEnclosingVariableOrCapture(), ToLocation, ToType,
+ E->getValueKind(), ToFoundD, ToResInfo);
if (E->hadMultipleCandidates())
- DRE->setHadMultipleCandidates(true);
- return DRE;
-}
-
-Expr *ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- return new (Importer.getToContext()) ImplicitValueInitExpr(T);
+ ToE->setHadMultipleCandidates(true);
+ return ToE;
}
-ASTNodeImporter::Designator
-ASTNodeImporter::ImportDesignator(const Designator &D) {
- if (D.isFieldDesignator()) {
- IdentifierInfo *ToFieldName = Importer.Import(D.getFieldName());
- // Caller checks for import error
- return Designator(ToFieldName, Importer.Import(D.getDotLoc()),
- Importer.Import(D.getFieldLoc()));
- }
- if (D.isArrayDesignator())
- return Designator(D.getFirstExprIndex(),
- Importer.Import(D.getLBracketLoc()),
- Importer.Import(D.getRBracketLoc()));
+ExpectedStmt ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ ExpectedType TypeOrErr = import(E->getType());
+ if (!TypeOrErr)
+ return TypeOrErr.takeError();
- assert(D.isArrayRangeDesignator());
- return Designator(D.getFirstExprIndex(),
- Importer.Import(D.getLBracketLoc()),
- Importer.Import(D.getEllipsisLoc()),
- Importer.Import(D.getRBracketLoc()));
+ return new (Importer.getToContext()) ImplicitValueInitExpr(*TypeOrErr);
}
+ExpectedStmt ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ ExpectedExpr ToInitOrErr = import(E->getInit());
+ if (!ToInitOrErr)
+ return ToInitOrErr.takeError();
-Expr *ASTNodeImporter::VisitDesignatedInitExpr(DesignatedInitExpr *DIE) {
- auto *Init = cast_or_null<Expr>(Importer.Import(DIE->getInit()));
- if (!Init)
- return nullptr;
+ ExpectedSLoc ToEqualOrColonLocOrErr = import(E->getEqualOrColonLoc());
+ if (!ToEqualOrColonLocOrErr)
+ return ToEqualOrColonLocOrErr.takeError();
- SmallVector<Expr *, 4> IndexExprs(DIE->getNumSubExprs() - 1);
+ SmallVector<Expr *, 4> ToIndexExprs(E->getNumSubExprs() - 1);
// List elements from the second, the first is Init itself
- for (unsigned I = 1, E = DIE->getNumSubExprs(); I < E; I++) {
- if (auto *Arg = cast_or_null<Expr>(Importer.Import(DIE->getSubExpr(I))))
- IndexExprs[I - 1] = Arg;
+ for (unsigned I = 1, N = E->getNumSubExprs(); I < N; I++) {
+ if (ExpectedExpr ToArgOrErr = import(E->getSubExpr(I)))
+ ToIndexExprs[I - 1] = *ToArgOrErr;
else
- return nullptr;
+ return ToArgOrErr.takeError();
}
- SmallVector<Designator, 4> Designators(DIE->size());
- llvm::transform(DIE->designators(), Designators.begin(),
- [this](const Designator &D) -> Designator {
- return ImportDesignator(D);
- });
-
- for (const auto &D : DIE->designators())
- if (D.isFieldDesignator() && !D.getFieldName())
- return nullptr;
+ SmallVector<Designator, 4> ToDesignators(E->size());
+ if (Error Err = ImportContainerChecked(E->designators(), ToDesignators))
+ return std::move(Err);
return DesignatedInitExpr::Create(
- Importer.getToContext(), Designators,
- IndexExprs, Importer.Import(DIE->getEqualOrColonLoc()),
- DIE->usesGNUSyntax(), Init);
+ Importer.getToContext(), ToDesignators,
+ ToIndexExprs, *ToEqualOrColonLocOrErr,
+ E->usesGNUSyntax(), *ToInitOrErr);
}
-Expr *ASTNodeImporter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return new (Importer.getToContext())
- CXXNullPtrLiteralExpr(T, Importer.Import(E->getLocation()));
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return new (Importer.getToContext()) CXXNullPtrLiteralExpr(
+ *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return IntegerLiteral::Create(Importer.getToContext(),
- E->getValue(), T,
- Importer.Import(E->getLocation()));
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return IntegerLiteral::Create(
+ Importer.getToContext(), E->getValue(), *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitFloatingLiteral(FloatingLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
- return FloatingLiteral::Create(Importer.getToContext(),
- E->getValue(), E->isExact(), T,
- Importer.Import(E->getLocation()));
+ExpectedStmt ASTNodeImporter::VisitFloatingLiteral(FloatingLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
+
+ return FloatingLiteral::Create(
+ Importer.getToContext(), E->getValue(), E->isExact(),
+ *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ auto ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
- E->getKind(), T,
- Importer.Import(E->getLocation()));
+ return new (Importer.getToContext()) ImaginaryLiteral(
+ *ToSubExprOrErr, *ToTypeOrErr);
}
-Expr *ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- SmallVector<SourceLocation, 4> Locations(E->getNumConcatenated());
- ImportArray(E->tokloc_begin(), E->tokloc_end(), Locations.begin());
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
- return StringLiteral::Create(Importer.getToContext(), E->getBytes(),
- E->getKind(), E->isPascal(), T,
- Locations.data(), Locations.size());
+ return new (Importer.getToContext()) CharacterLiteral(
+ E->getValue(), E->getKind(), *ToTypeOrErr, *ToLocationOrErr);
}
-Expr *ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitStringLiteral(StringLiteral *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- TypeSourceInfo *TInfo = Importer.Import(E->getTypeSourceInfo());
- if (!TInfo)
- return nullptr;
+ SmallVector<SourceLocation, 4> ToLocations(E->getNumConcatenated());
+ if (Error Err = ImportArrayChecked(
+ E->tokloc_begin(), E->tokloc_end(), ToLocations.begin()))
+ return std::move(Err);
- Expr *Init = Importer.Import(E->getInitializer());
- if (!Init)
- return nullptr;
+ return StringLiteral::Create(
+ Importer.getToContext(), E->getBytes(), E->getKind(), E->isPascal(),
+ *ToTypeOrErr, ToLocations.data(), ToLocations.size());
+}
+
+ExpectedStmt ASTNodeImporter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ auto Imp = importSeq(
+ E->getLParenLoc(), E->getTypeSourceInfo(), E->getType(),
+ E->getInitializer());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToLParenLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ QualType ToType;
+ Expr *ToInitializer;
+ std::tie(ToLParenLoc, ToTypeSourceInfo, ToType, ToInitializer) = *Imp;
return new (Importer.getToContext()) CompoundLiteralExpr(
- Importer.Import(E->getLParenLoc()), TInfo, T, E->getValueKind(),
- Init, E->isFileScope());
+ ToLParenLoc, ToTypeSourceInfo, ToType, E->getValueKind(),
+ ToInitializer, E->isFileScope());
}
-Expr *ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAtomicExpr(AtomicExpr *E) {
+ auto Imp = importSeq(
+ E->getBuiltinLoc(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- SmallVector<Expr *, 6> Exprs(E->getNumSubExprs());
- if (ImportArrayChecked(
- E->getSubExprs(), E->getSubExprs() + E->getNumSubExprs(),
- Exprs.begin()))
- return nullptr;
+ SourceLocation ToBuiltinLoc, ToRParenLoc;
+ QualType ToType;
+ std::tie(ToBuiltinLoc, ToType, ToRParenLoc) = *Imp;
+
+ SmallVector<Expr *, 6> ToExprs(E->getNumSubExprs());
+ if (Error Err = ImportArrayChecked(
+ E->getSubExprs(), E->getSubExprs() + E->getNumSubExprs(),
+ ToExprs.begin()))
+ return std::move(Err);
return new (Importer.getToContext()) AtomicExpr(
- Importer.Import(E->getBuiltinLoc()), Exprs, T, E->getOp(),
- Importer.Import(E->getRParenLoc()));
+ ToBuiltinLoc, ToExprs, ToType, E->getOp(), ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ auto Imp = importSeq(
+ E->getAmpAmpLoc(), E->getLabelLoc(), E->getLabel(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToLabel = cast_or_null<LabelDecl>(Importer.Import(E->getLabel()));
- if (!ToLabel)
- return nullptr;
+ SourceLocation ToAmpAmpLoc, ToLabelLoc;
+ LabelDecl *ToLabel;
+ QualType ToType;
+ std::tie(ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType) = *Imp;
return new (Importer.getToContext()) AddrLabelExpr(
- Importer.Import(E->getAmpAmpLoc()), Importer.Import(E->getLabelLoc()),
- ToLabel, T);
+ ToAmpAmpLoc, ToLabelLoc, ToLabel, ToType);
}
-Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitConstantExpr(ConstantExpr *E) {
+ auto Imp = importSeq(E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToSubExpr;
+ std::tie(ToSubExpr) = *Imp;
+
+ return ConstantExpr::Create(Importer.getToContext(), ToSubExpr);
+}
+
+ExpectedStmt ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ auto Imp = importSeq(E->getLParen(), E->getRParen(), E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToLParen, ToRParen;
+ Expr *ToSubExpr;
+ std::tie(ToLParen, ToRParen, ToSubExpr) = *Imp;
return new (Importer.getToContext())
- ParenExpr(Importer.Import(E->getLParen()),
- Importer.Import(E->getRParen()),
- SubExpr);
+ ParenExpr(ToLParen, ToRParen, ToSubExpr);
}
-Expr *ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
- SmallVector<Expr *, 4> Exprs(E->getNumExprs());
- if (ImportContainerChecked(E->exprs(), Exprs))
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
+ SmallVector<Expr *, 4> ToExprs(E->getNumExprs());
+ if (Error Err = ImportContainerChecked(E->exprs(), ToExprs))
+ return std::move(Err);
+
+ ExpectedSLoc ToLParenLocOrErr = import(E->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
- return new (Importer.getToContext()) ParenListExpr(
- Importer.getToContext(), Importer.Import(E->getLParenLoc()),
- Exprs, Importer.Import(E->getLParenLoc()));
+ ExpectedSLoc ToRParenLocOrErr = import(E->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+
+ return ParenListExpr::Create(Importer.getToContext(), *ToLParenLocOrErr,
+ ToExprs, *ToRParenLocOrErr);
}
-Expr *ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitStmtExpr(StmtExpr *E) {
+ auto Imp = importSeq(
+ E->getSubStmt(), E->getType(), E->getLParenLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToSubStmt = cast_or_null<CompoundStmt>(
- Importer.Import(E->getSubStmt()));
- if (!ToSubStmt && E->getSubStmt())
- return nullptr;
+ CompoundStmt *ToSubStmt;
+ QualType ToType;
+ SourceLocation ToLParenLoc, ToRParenLoc;
+ std::tie(ToSubStmt, ToType, ToLParenLoc, ToRParenLoc) = *Imp;
- return new (Importer.getToContext()) StmtExpr(ToSubStmt, T,
- Importer.Import(E->getLParenLoc()), Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) StmtExpr(
+ ToSubStmt, ToType, ToLParenLoc, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ auto Imp = importSeq(
+ E->getSubExpr(), E->getType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ Expr *ToSubExpr;
+ QualType ToType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToSubExpr, ToType, ToOperatorLoc) = *Imp;
return new (Importer.getToContext()) UnaryOperator(
- SubExpr, E->getOpcode(), T, E->getValueKind(), E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()), E->canOverflow());
+ ToSubExpr, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(),
+ ToOperatorLoc, E->canOverflow());
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
- QualType ResultType = Importer.Import(E->getType());
+ auto Imp = importSeq(E->getType(), E->getOperatorLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ std::tie(ToType, ToOperatorLoc, ToRParenLoc) = *Imp;
if (E->isArgumentType()) {
- TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
- if (!TInfo)
- return nullptr;
+ Expected<TypeSourceInfo *> ToArgumentTypeInfoOrErr =
+ import(E->getArgumentTypeInfo());
+ if (!ToArgumentTypeInfoOrErr)
+ return ToArgumentTypeInfoOrErr.takeError();
- return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
- TInfo, ResultType,
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(
+ E->getKind(), *ToArgumentTypeInfoOrErr, ToType, ToOperatorLoc,
+ ToRParenLoc);
}
- Expr *SubExpr = Importer.Import(E->getArgumentExpr());
- if (!SubExpr)
- return nullptr;
+ ExpectedExpr ToArgumentExprOrErr = import(E->getArgumentExpr());
+ if (!ToArgumentExprOrErr)
+ return ToArgumentExprOrErr.takeError();
- return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
- SubExpr, ResultType,
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getRParenLoc()));
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(
+ E->getKind(), *ToArgumentExprOrErr, ToType, ToOperatorLoc, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *LHS = Importer.Import(E->getLHS());
- if (!LHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ QualType ToType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToLHS, ToRHS, ToType, ToOperatorLoc) = *Imp;
- Expr *RHS = Importer.Import(E->getRHS());
- if (!RHS)
- return nullptr;
-
- return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
- T, E->getValueKind(),
- E->getObjectKind(),
- Importer.Import(E->getOperatorLoc()),
- E->getFPFeatures());
+ return new (Importer.getToContext()) BinaryOperator(
+ ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
+ E->getObjectKind(), ToOperatorLoc, E->getFPFeatures());
}
-Expr *ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *ToLHS = Importer.Import(E->getLHS());
- if (!ToLHS)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
+ auto Imp = importSeq(
+ E->getCond(), E->getQuestionLoc(), E->getLHS(), E->getColonLoc(),
+ E->getRHS(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToRHS = Importer.Import(E->getRHS());
- if (!ToRHS)
- return nullptr;
-
- Expr *ToCond = Importer.Import(E->getCond());
- if (!ToCond)
- return nullptr;
+ Expr *ToCond, *ToLHS, *ToRHS;
+ SourceLocation ToQuestionLoc, ToColonLoc;
+ QualType ToType;
+ std::tie(ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType) = *Imp;
return new (Importer.getToContext()) ConditionalOperator(
- ToCond, Importer.Import(E->getQuestionLoc()),
- ToLHS, Importer.Import(E->getColonLoc()),
- ToRHS, T, E->getValueKind(), E->getObjectKind());
+ ToCond, ToQuestionLoc, ToLHS, ToColonLoc, ToRHS, ToType,
+ E->getValueKind(), E->getObjectKind());
}
-Expr *ASTNodeImporter::VisitBinaryConditionalOperator(
+ExpectedStmt ASTNodeImporter::VisitBinaryConditionalOperator(
BinaryConditionalOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *Common = Importer.Import(E->getCommon());
- if (!Common)
- return nullptr;
-
- Expr *Cond = Importer.Import(E->getCond());
- if (!Cond)
- return nullptr;
-
- auto *OpaqueValue = cast_or_null<OpaqueValueExpr>(
- Importer.Import(E->getOpaqueValue()));
- if (!OpaqueValue)
- return nullptr;
-
- Expr *TrueExpr = Importer.Import(E->getTrueExpr());
- if (!TrueExpr)
- return nullptr;
-
- Expr *FalseExpr = Importer.Import(E->getFalseExpr());
- if (!FalseExpr)
- return nullptr;
+ auto Imp = importSeq(
+ E->getCommon(), E->getOpaqueValue(), E->getCond(), E->getTrueExpr(),
+ E->getFalseExpr(), E->getQuestionLoc(), E->getColonLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ Expr *ToCommon, *ToCond, *ToTrueExpr, *ToFalseExpr;
+ OpaqueValueExpr *ToOpaqueValue;
+ SourceLocation ToQuestionLoc, ToColonLoc;
+ QualType ToType;
+ std::tie(
+ ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr, ToQuestionLoc,
+ ToColonLoc, ToType) = *Imp;
return new (Importer.getToContext()) BinaryConditionalOperator(
- Common, OpaqueValue, Cond, TrueExpr, FalseExpr,
- Importer.Import(E->getQuestionLoc()), Importer.Import(E->getColonLoc()),
- T, E->getValueKind(), E->getObjectKind());
-}
-
-Expr *ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *ToQueried = Importer.Import(E->getQueriedTypeSourceInfo());
- if (!ToQueried)
- return nullptr;
-
- Expr *Dim = Importer.Import(E->getDimensionExpression());
- if (!Dim && E->getDimensionExpression())
- return nullptr;
+ ToCommon, ToOpaqueValue, ToCond, ToTrueExpr, ToFalseExpr,
+ ToQuestionLoc, ToColonLoc, ToType, E->getValueKind(),
+ E->getObjectKind());
+}
+
+ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getQueriedTypeSourceInfo(),
+ E->getDimensionExpression(), E->getEndLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToBeginLoc, ToEndLoc;
+ TypeSourceInfo *ToQueriedTypeSourceInfo;
+ Expr *ToDimensionExpression;
+ QualType ToType;
+ std::tie(
+ ToBeginLoc, ToQueriedTypeSourceInfo, ToDimensionExpression, ToEndLoc,
+ ToType) = *Imp;
return new (Importer.getToContext()) ArrayTypeTraitExpr(
- Importer.Import(E->getLocStart()), E->getTrait(), ToQueried,
- E->getValue(), Dim, Importer.Import(E->getLocEnd()), T);
+ ToBeginLoc, E->getTrait(), ToQueriedTypeSourceInfo, E->getValue(),
+ ToDimensionExpression, ToEndLoc, ToType);
}
-Expr *ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getBeginLoc(), E->getQueriedExpression(), E->getEndLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToQueried = Importer.Import(E->getQueriedExpression());
- if (!ToQueried)
- return nullptr;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ Expr *ToQueriedExpression;
+ QualType ToType;
+ std::tie(ToBeginLoc, ToQueriedExpression, ToEndLoc, ToType) = *Imp;
return new (Importer.getToContext()) ExpressionTraitExpr(
- Importer.Import(E->getLocStart()), E->getTrait(), ToQueried,
- E->getValue(), Importer.Import(E->getLocEnd()), T);
+ ToBeginLoc, E->getTrait(), ToQueriedExpression, E->getValue(),
+ ToEndLoc, ToType);
}
-Expr *ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ auto Imp = importSeq(
+ E->getLocation(), E->getType(), E->getSourceExpr());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SourceExpr = Importer.Import(E->getSourceExpr());
- if (!SourceExpr && E->getSourceExpr())
- return nullptr;
+ SourceLocation ToLocation;
+ QualType ToType;
+ Expr *ToSourceExpr;
+ std::tie(ToLocation, ToType, ToSourceExpr) = *Imp;
return new (Importer.getToContext()) OpaqueValueExpr(
- Importer.Import(E->getLocation()), T, E->getValueKind(),
- E->getObjectKind(), SourceExpr);
+ ToLocation, ToType, E->getValueKind(), E->getObjectKind(), ToSourceExpr);
}
-Expr *ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *ToLHS = Importer.Import(E->getLHS());
- if (!ToLHS)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getRBracketLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToRHS = Importer.Import(E->getRHS());
- if (!ToRHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ SourceLocation ToRBracketLoc;
+ QualType ToType;
+ std::tie(ToLHS, ToRHS, ToType, ToRBracketLoc) = *Imp;
return new (Importer.getToContext()) ArraySubscriptExpr(
- ToLHS, ToRHS, T, E->getValueKind(), E->getObjectKind(),
- Importer.Import(E->getRBracketLoc()));
+ ToLHS, ToRHS, ToType, E->getValueKind(), E->getObjectKind(),
+ ToRBracketLoc);
}
-Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- QualType CompLHSType = Importer.Import(E->getComputationLHSType());
- if (CompLHSType.isNull())
- return nullptr;
-
- QualType CompResultType = Importer.Import(E->getComputationResultType());
- if (CompResultType.isNull())
- return nullptr;
-
- Expr *LHS = Importer.Import(E->getLHS());
- if (!LHS)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ auto Imp = importSeq(
+ E->getLHS(), E->getRHS(), E->getType(), E->getComputationLHSType(),
+ E->getComputationResultType(), E->getOperatorLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *RHS = Importer.Import(E->getRHS());
- if (!RHS)
- return nullptr;
+ Expr *ToLHS, *ToRHS;
+ QualType ToType, ToComputationLHSType, ToComputationResultType;
+ SourceLocation ToOperatorLoc;
+ std::tie(ToLHS, ToRHS, ToType, ToComputationLHSType, ToComputationResultType,
+ ToOperatorLoc) = *Imp;
- return new (Importer.getToContext())
- CompoundAssignOperator(LHS, RHS, E->getOpcode(),
- T, E->getValueKind(),
- E->getObjectKind(),
- CompLHSType, CompResultType,
- Importer.Import(E->getOperatorLoc()),
- E->getFPFeatures());
+ return new (Importer.getToContext()) CompoundAssignOperator(
+ ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(),
+ E->getObjectKind(), ToComputationLHSType, ToComputationResultType,
+ ToOperatorLoc, E->getFPFeatures());
}
-bool ASTNodeImporter::ImportCastPath(CastExpr *CE, CXXCastPath &Path) {
+Expected<CXXCastPath>
+ASTNodeImporter::ImportCastPath(CastExpr *CE) {
+ CXXCastPath Path;
for (auto I = CE->path_begin(), E = CE->path_end(); I != E; ++I) {
- if (CXXBaseSpecifier *Spec = Importer.Import(*I))
- Path.push_back(Spec);
+ if (auto SpecOrErr = import(*I))
+ Path.push_back(*SpecOrErr);
else
- return true;
+ return SpecOrErr.takeError();
}
- return false;
+ return Path;
}
-Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
+ Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
- return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
- SubExpr, &BasePath, E->getValueKind());
+ return ImplicitCastExpr::Create(
+ Importer.getToContext(), *ToTypeOrErr, E->getCastKind(), *ToSubExprOrErr,
+ &(*ToBasePathOrErr), E->getValueKind());
}
-Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ auto Imp1 = importSeq(
+ E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten());
+ if (!Imp1)
+ return Imp1.takeError();
- TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
- if (!TInfo && E->getTypeInfoAsWritten())
- return nullptr;
+ QualType ToType;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToTypeInfoAsWritten;
+ std::tie(ToType, ToSubExpr, ToTypeInfoAsWritten) = *Imp1;
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
+ Expected<CXXCastPath> ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
+ CXXCastPath *ToBasePath = &(*ToBasePathOrErr);
switch (E->getStmtClass()) {
case Stmt::CStyleCastExprClass: {
auto *CCE = cast<CStyleCastExpr>(E);
- return CStyleCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- Importer.Import(CCE->getLParenLoc()),
- Importer.Import(CCE->getRParenLoc()));
+ ExpectedSLoc ToLParenLocOrErr = import(CCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToRParenLocOrErr = import(CCE->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+ return CStyleCastExpr::Create(
+ Importer.getToContext(), ToType, E->getValueKind(), E->getCastKind(),
+ ToSubExpr, ToBasePath, ToTypeInfoAsWritten, *ToLParenLocOrErr,
+ *ToRParenLocOrErr);
}
case Stmt::CXXFunctionalCastExprClass: {
auto *FCE = cast<CXXFunctionalCastExpr>(E);
- return CXXFunctionalCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), TInfo,
- E->getCastKind(), SubExpr, &BasePath,
- Importer.Import(FCE->getLParenLoc()),
- Importer.Import(FCE->getRParenLoc()));
+ ExpectedSLoc ToLParenLocOrErr = import(FCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToRParenLocOrErr = import(FCE->getRParenLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+ return CXXFunctionalCastExpr::Create(
+ Importer.getToContext(), ToType, E->getValueKind(), ToTypeInfoAsWritten,
+ E->getCastKind(), ToSubExpr, ToBasePath, *ToLParenLocOrErr,
+ *ToRParenLocOrErr);
}
case Stmt::ObjCBridgedCastExprClass: {
- auto *OCE = cast<ObjCBridgedCastExpr>(E);
- return new (Importer.getToContext()) ObjCBridgedCastExpr(
- Importer.Import(OCE->getLParenLoc()), OCE->getBridgeKind(),
- E->getCastKind(), Importer.Import(OCE->getBridgeKeywordLoc()),
- TInfo, SubExpr);
+ auto *OCE = cast<ObjCBridgedCastExpr>(E);
+ ExpectedSLoc ToLParenLocOrErr = import(OCE->getLParenLoc());
+ if (!ToLParenLocOrErr)
+ return ToLParenLocOrErr.takeError();
+ ExpectedSLoc ToBridgeKeywordLocOrErr = import(OCE->getBridgeKeywordLoc());
+ if (!ToBridgeKeywordLocOrErr)
+ return ToBridgeKeywordLocOrErr.takeError();
+ return new (Importer.getToContext()) ObjCBridgedCastExpr(
+ *ToLParenLocOrErr, OCE->getBridgeKind(), E->getCastKind(),
+ *ToBridgeKeywordLocOrErr, ToTypeInfoAsWritten, ToSubExpr);
}
default:
- break; // just fall through
- }
-
- auto *Named = cast<CXXNamedCastExpr>(E);
- SourceLocation ExprLoc = Importer.Import(Named->getOperatorLoc()),
- RParenLoc = Importer.Import(Named->getRParenLoc());
- SourceRange Brackets = Importer.Import(Named->getAngleBrackets());
-
- switch (E->getStmtClass()) {
- case Stmt::CXXStaticCastExprClass:
- return CXXStaticCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- ExprLoc, RParenLoc, Brackets);
-
- case Stmt::CXXDynamicCastExprClass:
- return CXXDynamicCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- ExprLoc, RParenLoc, Brackets);
-
- case Stmt::CXXReinterpretCastExprClass:
- return CXXReinterpretCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- ExprLoc, RParenLoc, Brackets);
-
- case Stmt::CXXConstCastExprClass:
- return CXXConstCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), SubExpr, TInfo, ExprLoc,
- RParenLoc, Brackets);
- default:
llvm_unreachable("Cast expression of unsupported type!");
- return nullptr;
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
}
-Expr *ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *OE) {
- QualType T = Importer.Import(OE->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ SmallVector<OffsetOfNode, 4> ToNodes;
+ for (int I = 0, N = E->getNumComponents(); I < N; ++I) {
+ const OffsetOfNode &FromNode = E->getComponent(I);
- SmallVector<OffsetOfNode, 4> Nodes;
- for (int I = 0, E = OE->getNumComponents(); I < E; ++I) {
- const OffsetOfNode &Node = OE->getComponent(I);
+ SourceLocation ToBeginLoc, ToEndLoc;
+ if (FromNode.getKind() != OffsetOfNode::Base) {
+ auto Imp = importSeq(FromNode.getBeginLoc(), FromNode.getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+ std::tie(ToBeginLoc, ToEndLoc) = *Imp;
+ }
- switch (Node.getKind()) {
+ switch (FromNode.getKind()) {
case OffsetOfNode::Array:
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()),
- Node.getArrayExprIndex(),
- Importer.Import(Node.getLocEnd())));
+ ToNodes.push_back(
+ OffsetOfNode(ToBeginLoc, FromNode.getArrayExprIndex(), ToEndLoc));
break;
-
case OffsetOfNode::Base: {
- CXXBaseSpecifier *BS = Importer.Import(Node.getBase());
- if (!BS && Node.getBase())
- return nullptr;
- Nodes.push_back(OffsetOfNode(BS));
+ auto ToBSOrErr = import(FromNode.getBase());
+ if (!ToBSOrErr)
+ return ToBSOrErr.takeError();
+ ToNodes.push_back(OffsetOfNode(*ToBSOrErr));
break;
}
case OffsetOfNode::Field: {
- auto *FD = cast_or_null<FieldDecl>(Importer.Import(Node.getField()));
- if (!FD)
- return nullptr;
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()), FD,
- Importer.Import(Node.getLocEnd())));
+ auto ToFieldOrErr = import(FromNode.getField());
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
+ ToNodes.push_back(OffsetOfNode(ToBeginLoc, *ToFieldOrErr, ToEndLoc));
break;
}
case OffsetOfNode::Identifier: {
- IdentifierInfo *ToII = Importer.Import(Node.getFieldName());
- if (!ToII)
- return nullptr;
- Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()), ToII,
- Importer.Import(Node.getLocEnd())));
+ IdentifierInfo *ToII = Importer.Import(FromNode.getFieldName());
+ ToNodes.push_back(OffsetOfNode(ToBeginLoc, ToII, ToEndLoc));
break;
}
}
}
- SmallVector<Expr *, 4> Exprs(OE->getNumExpressions());
- for (int I = 0, E = OE->getNumExpressions(); I < E; ++I) {
- Expr *ToIndexExpr = Importer.Import(OE->getIndexExpr(I));
- if (!ToIndexExpr)
- return nullptr;
- Exprs[I] = ToIndexExpr;
+ SmallVector<Expr *, 4> ToExprs(E->getNumExpressions());
+ for (int I = 0, N = E->getNumExpressions(); I < N; ++I) {
+ ExpectedExpr ToIndexExprOrErr = import(E->getIndexExpr(I));
+ if (!ToIndexExprOrErr)
+ return ToIndexExprOrErr.takeError();
+ ToExprs[I] = *ToIndexExprOrErr;
}
- TypeSourceInfo *TInfo = Importer.Import(OE->getTypeSourceInfo());
- if (!TInfo && OE->getTypeSourceInfo())
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getTypeSourceInfo(), E->getOperatorLoc(),
+ E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ std::tie(ToType, ToTypeSourceInfo, ToOperatorLoc, ToRParenLoc) = *Imp;
- return OffsetOfExpr::Create(Importer.getToContext(), T,
- Importer.Import(OE->getOperatorLoc()),
- TInfo, Nodes, Exprs,
- Importer.Import(OE->getRParenLoc()));
+ return OffsetOfExpr::Create(
+ Importer.getToContext(), ToType, ToOperatorLoc, ToTypeSourceInfo, ToNodes,
+ ToExprs, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getOperand(), E->getBeginLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Operand = Importer.Import(E->getOperand());
- if (!Operand)
- return nullptr;
+ QualType ToType;
+ Expr *ToOperand;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToType, ToOperand, ToBeginLoc, ToEndLoc) = *Imp;
- CanThrowResult CanThrow;
+ CanThrowResult ToCanThrow;
if (E->isValueDependent())
- CanThrow = CT_Dependent;
+ ToCanThrow = CT_Dependent;
else
- CanThrow = E->getValue() ? CT_Can : CT_Cannot;
+ ToCanThrow = E->getValue() ? CT_Can : CT_Cannot;
return new (Importer.getToContext()) CXXNoexceptExpr(
- T, Operand, CanThrow,
- Importer.Import(E->getLocStart()), Importer.Import(E->getLocEnd()));
+ ToType, ToOperand, ToCanThrow, ToBeginLoc, ToEndLoc);
}
-Expr *ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ auto Imp = importSeq(E->getSubExpr(), E->getType(), E->getThrowLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr && E->getSubExpr())
- return nullptr;
+ Expr *ToSubExpr;
+ QualType ToType;
+ SourceLocation ToThrowLoc;
+ std::tie(ToSubExpr, ToType, ToThrowLoc) = *Imp;
return new (Importer.getToContext()) CXXThrowExpr(
- SubExpr, T, Importer.Import(E->getThrowLoc()),
- E->isThrownVariableInScope());
+ ToSubExpr, ToType, ToThrowLoc, E->isThrownVariableInScope());
}
-Expr *ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- auto *Param = cast_or_null<ParmVarDecl>(Importer.Import(E->getParam()));
- if (!Param)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ ExpectedSLoc ToUsedLocOrErr = import(E->getUsedLocation());
+ if (!ToUsedLocOrErr)
+ return ToUsedLocOrErr.takeError();
+
+ auto ToParamOrErr = import(E->getParam());
+ if (!ToParamOrErr)
+ return ToParamOrErr.takeError();
return CXXDefaultArgExpr::Create(
- Importer.getToContext(), Importer.Import(E->getUsedLocation()), Param);
+ Importer.getToContext(), *ToUsedLocOrErr, *ToParamOrErr);
}
-Expr *ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getTypeSourceInfo(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *TypeInfo = Importer.Import(E->getTypeSourceInfo());
- if (!TypeInfo)
- return nullptr;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceLocation ToRParenLoc;
+ std::tie(ToType, ToTypeSourceInfo, ToRParenLoc) = *Imp;
return new (Importer.getToContext()) CXXScalarValueInitExpr(
- T, TypeInfo, Importer.Import(E->getRParenLoc()));
+ ToType, ToTypeSourceInfo, ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- Expr *SubExpr = Importer.Import(E->getSubExpr());
- if (!SubExpr)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- auto *Dtor = cast_or_null<CXXDestructorDecl>(
- Importer.Import(const_cast<CXXDestructorDecl *>(
- E->getTemporary()->getDestructor())));
- if (!Dtor)
- return nullptr;
+ auto ToDtorOrErr = import(E->getTemporary()->getDestructor());
+ if (!ToDtorOrErr)
+ return ToDtorOrErr.takeError();
ASTContext &ToCtx = Importer.getToContext();
- CXXTemporary *Temp = CXXTemporary::Create(ToCtx, Dtor);
- return CXXBindTemporaryExpr::Create(ToCtx, Temp, SubExpr);
+ CXXTemporary *Temp = CXXTemporary::Create(ToCtx, *ToDtorOrErr);
+ return CXXBindTemporaryExpr::Create(ToCtx, Temp, *ToSubExprOrErr);
}
-Expr *ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE) {
- QualType T = Importer.Import(CE->getType());
- if (T.isNull())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(CE->getTypeSourceInfo());
- if (!TInfo)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ auto Imp = importSeq(
+ E->getConstructor(), E->getType(), E->getTypeSourceInfo(),
+ E->getParenOrBraceRange());
+ if (!Imp)
+ return Imp.takeError();
- SmallVector<Expr *, 8> Args(CE->getNumArgs());
- if (ImportContainerChecked(CE->arguments(), Args))
- return nullptr;
+ CXXConstructorDecl *ToConstructor;
+ QualType ToType;
+ TypeSourceInfo *ToTypeSourceInfo;
+ SourceRange ToParenOrBraceRange;
+ std::tie(ToConstructor, ToType, ToTypeSourceInfo, ToParenOrBraceRange) = *Imp;
- auto *Ctor = cast_or_null<CXXConstructorDecl>(
- Importer.Import(CE->getConstructor()));
- if (!Ctor)
- return nullptr;
+ SmallVector<Expr *, 8> ToArgs(E->getNumArgs());
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
- return new (Importer.getToContext()) CXXTemporaryObjectExpr(
- Importer.getToContext(), Ctor, T, TInfo, Args,
- Importer.Import(CE->getParenOrBraceRange()), CE->hadMultipleCandidates(),
- CE->isListInitialization(), CE->isStdInitListInitialization(),
- CE->requiresZeroInitialization());
+ return CXXTemporaryObjectExpr::Create(
+ Importer.getToContext(), ToConstructor, ToType, ToTypeSourceInfo, ToArgs,
+ ToParenOrBraceRange, E->hadMultipleCandidates(),
+ E->isListInitialization(), E->isStdInitListInitialization(),
+ E->requiresZeroInitialization());
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->GetTemporaryExpr(), E->getExtendingDecl());
+ if (!Imp)
+ return Imp.takeError();
- Expr *TempE = Importer.Import(E->GetTemporaryExpr());
- if (!TempE)
- return nullptr;
-
- auto *ExtendedBy = cast_or_null<ValueDecl>(
- Importer.Import(const_cast<ValueDecl *>(E->getExtendingDecl())));
- if (!ExtendedBy && E->getExtendingDecl())
- return nullptr;
+ QualType ToType;
+ Expr *ToTemporaryExpr;
+ const ValueDecl *ToExtendingDecl;
+ std::tie(ToType, ToTemporaryExpr, ToExtendingDecl) = *Imp;
auto *ToMTE = new (Importer.getToContext()) MaterializeTemporaryExpr(
- T, TempE, E->isBoundToLvalueReference());
+ ToType, ToTemporaryExpr, E->isBoundToLvalueReference());
// FIXME: Should ManglingNumber get numbers associated with 'to' context?
- ToMTE->setExtendingDecl(ExtendedBy, E->getManglingNumber());
+ ToMTE->setExtendingDecl(ToExtendingDecl, E->getManglingNumber());
return ToMTE;
}
-Expr *ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getPattern(), E->getEllipsisLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *Pattern = Importer.Import(E->getPattern());
- if (!Pattern)
- return nullptr;
+ QualType ToType;
+ Expr *ToPattern;
+ SourceLocation ToEllipsisLoc;
+ std::tie(ToType, ToPattern, ToEllipsisLoc) = *Imp;
return new (Importer.getToContext()) PackExpansionExpr(
- T, Pattern, Importer.Import(E->getEllipsisLoc()),
- E->getNumExpansions());
+ ToType, ToPattern, ToEllipsisLoc, E->getNumExpansions());
}
-Expr *ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
- auto *Pack = cast_or_null<NamedDecl>(Importer.Import(E->getPack()));
- if (!Pack)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ auto Imp = importSeq(
+ E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Optional<unsigned> Length;
+ SourceLocation ToOperatorLoc, ToPackLoc, ToRParenLoc;
+ NamedDecl *ToPack;
+ std::tie(ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc) = *Imp;
+ Optional<unsigned> Length;
if (!E->isValueDependent())
Length = E->getPackLength();
- SmallVector<TemplateArgument, 8> PartialArguments;
+ SmallVector<TemplateArgument, 8> ToPartialArguments;
if (E->isPartiallySubstituted()) {
- if (ImportTemplateArguments(E->getPartialArguments().data(),
- E->getPartialArguments().size(),
- PartialArguments))
- return nullptr;
+ if (Error Err = ImportTemplateArguments(
+ E->getPartialArguments().data(),
+ E->getPartialArguments().size(),
+ ToPartialArguments))
+ return std::move(Err);
}
return SizeOfPackExpr::Create(
- Importer.getToContext(), Importer.Import(E->getOperatorLoc()), Pack,
- Importer.Import(E->getPackLoc()), Importer.Import(E->getRParenLoc()),
- Length, PartialArguments);
-}
-
-Expr *ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *CE) {
- QualType T = Importer.Import(CE->getType());
- if (T.isNull())
- return nullptr;
-
- SmallVector<Expr *, 4> PlacementArgs(CE->getNumPlacementArgs());
- if (ImportContainerChecked(CE->placement_arguments(), PlacementArgs))
- return nullptr;
-
- auto *OperatorNewDecl = cast_or_null<FunctionDecl>(
- Importer.Import(CE->getOperatorNew()));
- if (!OperatorNewDecl && CE->getOperatorNew())
- return nullptr;
-
- auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
- Importer.Import(CE->getOperatorDelete()));
- if (!OperatorDeleteDecl && CE->getOperatorDelete())
- return nullptr;
-
- Expr *ToInit = Importer.Import(CE->getInitializer());
- if (!ToInit && CE->getInitializer())
- return nullptr;
-
- TypeSourceInfo *TInfo = Importer.Import(CE->getAllocatedTypeSourceInfo());
- if (!TInfo)
- return nullptr;
-
- Expr *ToArrSize = Importer.Import(CE->getArraySize());
- if (!ToArrSize && CE->getArraySize())
- return nullptr;
-
- return new (Importer.getToContext()) CXXNewExpr(
- Importer.getToContext(),
- CE->isGlobalNew(),
- OperatorNewDecl, OperatorDeleteDecl,
- CE->passAlignment(),
- CE->doesUsualArrayDeleteWantSize(),
- PlacementArgs,
- Importer.Import(CE->getTypeIdParens()),
- ToArrSize, CE->getInitializationStyle(), ToInit, T, TInfo,
- Importer.Import(CE->getSourceRange()),
- Importer.Import(CE->getDirectInitRange()));
-}
-
-Expr *ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
-
- auto *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
- Importer.Import(E->getOperatorDelete()));
- if (!OperatorDeleteDecl && E->getOperatorDelete())
- return nullptr;
-
- Expr *ToArg = Importer.Import(E->getArgument());
- if (!ToArg && E->getArgument())
- return nullptr;
+ Importer.getToContext(), ToOperatorLoc, ToPack, ToPackLoc, ToRParenLoc,
+ Length, ToPartialArguments);
+}
+
+
+ExpectedStmt ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *E) {
+ auto Imp = importSeq(
+ E->getOperatorNew(), E->getOperatorDelete(), E->getTypeIdParens(),
+ E->getArraySize(), E->getInitializer(), E->getType(),
+ E->getAllocatedTypeSourceInfo(), E->getSourceRange(),
+ E->getDirectInitRange());
+ if (!Imp)
+ return Imp.takeError();
+
+ FunctionDecl *ToOperatorNew, *ToOperatorDelete;
+ SourceRange ToTypeIdParens, ToSourceRange, ToDirectInitRange;
+ Expr *ToArraySize, *ToInitializer;
+ QualType ToType;
+ TypeSourceInfo *ToAllocatedTypeSourceInfo;
+ std::tie(
+ ToOperatorNew, ToOperatorDelete, ToTypeIdParens, ToArraySize, ToInitializer,
+ ToType, ToAllocatedTypeSourceInfo, ToSourceRange, ToDirectInitRange) = *Imp;
+
+ SmallVector<Expr *, 4> ToPlacementArgs(E->getNumPlacementArgs());
+ if (Error Err =
+ ImportContainerChecked(E->placement_arguments(), ToPlacementArgs))
+ return std::move(Err);
+
+ return CXXNewExpr::Create(
+ Importer.getToContext(), E->isGlobalNew(), ToOperatorNew,
+ ToOperatorDelete, E->passAlignment(), E->doesUsualArrayDeleteWantSize(),
+ ToPlacementArgs, ToTypeIdParens, ToArraySize, E->getInitializationStyle(),
+ ToInitializer, ToType, ToAllocatedTypeSourceInfo, ToSourceRange,
+ ToDirectInitRange);
+}
+
+ExpectedStmt ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getOperatorDelete(), E->getArgument(), E->getBeginLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ FunctionDecl *ToOperatorDelete;
+ Expr *ToArgument;
+ SourceLocation ToBeginLoc;
+ std::tie(ToType, ToOperatorDelete, ToArgument, ToBeginLoc) = *Imp;
return new (Importer.getToContext()) CXXDeleteExpr(
- T, E->isGlobalDelete(),
- E->isArrayForm(),
- E->isArrayFormAsWritten(),
- E->doesUsualArrayDeleteWantSize(),
- OperatorDeleteDecl,
- ToArg,
- Importer.Import(E->getLocStart()));
+ ToType, E->isGlobalDelete(), E->isArrayForm(), E->isArrayFormAsWritten(),
+ E->doesUsualArrayDeleteWantSize(), ToOperatorDelete, ToArgument,
+ ToBeginLoc);
}
-Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getLocation(), E->getConstructor(),
+ E->getParenOrBraceRange());
+ if (!Imp)
+ return Imp.takeError();
- auto *ToCCD =
- dyn_cast_or_null<CXXConstructorDecl>(Importer.Import(E->getConstructor()));
- if (!ToCCD)
- return nullptr;
+ QualType ToType;
+ SourceLocation ToLocation;
+ CXXConstructorDecl *ToConstructor;
+ SourceRange ToParenOrBraceRange;
+ std::tie(ToType, ToLocation, ToConstructor, ToParenOrBraceRange) = *Imp;
SmallVector<Expr *, 6> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
- return CXXConstructExpr::Create(Importer.getToContext(), T,
- Importer.Import(E->getLocation()),
- ToCCD, E->isElidable(),
- ToArgs, E->hadMultipleCandidates(),
- E->isListInitialization(),
- E->isStdInitListInitialization(),
- E->requiresZeroInitialization(),
- E->getConstructionKind(),
- Importer.Import(E->getParenOrBraceRange()));
+ return CXXConstructExpr::Create(
+ Importer.getToContext(), ToType, ToLocation, ToConstructor,
+ E->isElidable(), ToArgs, E->hadMultipleCandidates(),
+ E->isListInitialization(), E->isStdInitListInitialization(),
+ E->requiresZeroInitialization(), E->getConstructionKind(),
+ ToParenOrBraceRange);
}
-Expr *ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *EWC) {
- Expr *SubExpr = Importer.Import(EWC->getSubExpr());
- if (!SubExpr && EWC->getSubExpr())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- SmallVector<ExprWithCleanups::CleanupObject, 8> Objs(EWC->getNumObjects());
- for (unsigned I = 0, E = EWC->getNumObjects(); I < E; I++)
- if (ExprWithCleanups::CleanupObject Obj =
- cast_or_null<BlockDecl>(Importer.Import(EWC->getObject(I))))
- Objs[I] = Obj;
- else
- return nullptr;
+ SmallVector<ExprWithCleanups::CleanupObject, 8> ToObjects(E->getNumObjects());
+ if (Error Err = ImportContainerChecked(E->getObjects(), ToObjects))
+ return std::move(Err);
- return ExprWithCleanups::Create(Importer.getToContext(),
- SubExpr, EWC->cleanupsHaveSideEffects(),
- Objs);
+ return ExprWithCleanups::Create(
+ Importer.getToContext(), *ToSubExprOrErr, E->cleanupsHaveSideEffects(),
+ ToObjects);
}
-Expr *ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ auto Imp = importSeq(
+ E->getCallee(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToFn = Importer.Import(E->getCallee());
- if (!ToFn)
- return nullptr;
+ Expr *ToCallee;
+ QualType ToType;
+ SourceLocation ToRParenLoc;
+ std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
SmallVector<Expr *, 4> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
- return new (Importer.getToContext()) CXXMemberCallExpr(
- Importer.getToContext(), ToFn, ToArgs, T, E->getValueKind(),
- Importer.Import(E->getRParenLoc()));
+ return CXXMemberCallExpr::Create(Importer.getToContext(), ToCallee, ToArgs,
+ ToType, E->getValueKind(), ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- return new (Importer.getToContext())
- CXXThisExpr(Importer.Import(E->getLocation()), T, E->isImplicit());
-}
-
-Expr *ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
- return new (Importer.getToContext())
- CXXBoolLiteralExpr(E->getValue(), T, Importer.Import(E->getLocation()));
+ return new (Importer.getToContext()) CXXThisExpr(
+ *ToLocationOrErr, *ToTypeOrErr, E->isImplicit());
}
+ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
-Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ ExpectedSLoc ToLocationOrErr = import(E->getLocation());
+ if (!ToLocationOrErr)
+ return ToLocationOrErr.takeError();
- Expr *ToBase = Importer.Import(E->getBase());
- if (!ToBase && E->getBase())
- return nullptr;
+ return new (Importer.getToContext()) CXXBoolLiteralExpr(
+ E->getValue(), *ToTypeOrErr, *ToLocationOrErr);
+}
- auto *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl()));
- if (!ToMember && E->getMemberDecl())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
+ auto Imp1 = importSeq(
+ E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc(), E->getMemberDecl(), E->getType());
+ if (!Imp1)
+ return Imp1.takeError();
- auto *ToDecl =
- dyn_cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl()));
- if (!ToDecl && E->getFoundDecl().getDecl())
- return nullptr;
+ Expr *ToBase;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ ValueDecl *ToMemberDecl;
+ QualType ToType;
+ std::tie(
+ ToBase, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl,
+ ToType) = *Imp1;
+
+ auto Imp2 = importSeq(
+ E->getFoundDecl().getDecl(), E->getMemberNameInfo().getName(),
+ E->getMemberNameInfo().getLoc(), E->getLAngleLoc(), E->getRAngleLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+ NamedDecl *ToDecl;
+ DeclarationName ToName;
+ SourceLocation ToLoc, ToLAngleLoc, ToRAngleLoc;
+ std::tie(ToDecl, ToName, ToLoc, ToLAngleLoc, ToRAngleLoc) = *Imp2;
DeclAccessPair ToFoundDecl =
DeclAccessPair::make(ToDecl, E->getFoundDecl().getAccess());
- DeclarationNameInfo ToMemberNameInfo(
- Importer.Import(E->getMemberNameInfo().getName()),
- Importer.Import(E->getMemberNameInfo().getLoc()));
+ DeclarationNameInfo ToMemberNameInfo(ToName, ToLoc);
if (E->hasExplicitTemplateArgs()) {
- return nullptr; // FIXME: handle template arguments
+ // FIXME: handle template arguments
+ return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
- return MemberExpr::Create(Importer.getToContext(), ToBase,
- E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- ToMember, ToFoundDecl, ToMemberNameInfo,
- nullptr, T, E->getValueKind(),
- E->getObjectKind());
+ return MemberExpr::Create(
+ Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToTemplateKeywordLoc, ToMemberDecl, ToFoundDecl,
+ ToMemberNameInfo, nullptr, ToType, E->getValueKind(), E->getObjectKind());
}
-Expr *ASTNodeImporter::VisitCXXPseudoDestructorExpr(
- CXXPseudoDestructorExpr *E) {
- Expr *BaseE = Importer.Import(E->getBase());
- if (!BaseE)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ auto Imp = importSeq(
+ E->getBase(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getScopeTypeInfo(), E->getColonColonLoc(), E->getTildeLoc());
+ if (!Imp)
+ return Imp.takeError();
- TypeSourceInfo *ScopeInfo = Importer.Import(E->getScopeTypeInfo());
- if (!ScopeInfo && E->getScopeTypeInfo())
- return nullptr;
+ Expr *ToBase;
+ SourceLocation ToOperatorLoc, ToColonColonLoc, ToTildeLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ TypeSourceInfo *ToScopeTypeInfo;
+ std::tie(
+ ToBase, ToOperatorLoc, ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc,
+ ToTildeLoc) = *Imp;
PseudoDestructorTypeStorage Storage;
if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) {
IdentifierInfo *ToII = Importer.Import(FromII);
- if (!ToII)
- return nullptr;
- Storage = PseudoDestructorTypeStorage(
- ToII, Importer.Import(E->getDestroyedTypeLoc()));
+ ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc());
+ if (!ToDestroyedTypeLocOrErr)
+ return ToDestroyedTypeLocOrErr.takeError();
+ Storage = PseudoDestructorTypeStorage(ToII, *ToDestroyedTypeLocOrErr);
} else {
- TypeSourceInfo *TI = Importer.Import(E->getDestroyedTypeInfo());
- if (!TI)
- return nullptr;
- Storage = PseudoDestructorTypeStorage(TI);
+ if (auto ToTIOrErr = import(E->getDestroyedTypeInfo()))
+ Storage = PseudoDestructorTypeStorage(*ToTIOrErr);
+ else
+ return ToTIOrErr.takeError();
}
return new (Importer.getToContext()) CXXPseudoDestructorExpr(
- Importer.getToContext(), BaseE, E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- ScopeInfo, Importer.Import(E->getColonColonLoc()),
- Importer.Import(E->getTildeLoc()), Storage);
+ Importer.getToContext(), ToBase, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToScopeTypeInfo, ToColonColonLoc, ToTildeLoc, Storage);
}
-Expr *ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *E) {
- Expr *Base = nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc(), E->getFirstQualifierFoundInScope());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ NamedDecl *ToFirstQualifierFoundInScope;
+ std::tie(
+ ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
+ ToFirstQualifierFoundInScope) = *Imp;
+
+ Expr *ToBase = nullptr;
if (!E->isImplicitAccess()) {
- Base = Importer.Import(E->getBase());
- if (!Base)
- return nullptr;
+ if (ExpectedExpr ToBaseOrErr = import(E->getBase()))
+ ToBase = *ToBaseOrErr;
+ else
+ return ToBaseOrErr.takeError();
}
- QualType BaseType = Importer.Import(E->getBaseType());
- if (BaseType.isNull())
- return nullptr;
-
TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
- E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
+ ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
- DeclarationName Name = Importer.Import(E->getMember());
- if (!E->getMember().isEmpty() && Name.isEmpty())
- return nullptr;
-
- DeclarationNameInfo MemberNameInfo(Name, Importer.Import(E->getMemberLoc()));
+ auto ToMemberNameInfoOrErr = importSeq(E->getMember(), E->getMemberLoc());
+ if (!ToMemberNameInfoOrErr)
+ return ToMemberNameInfoOrErr.takeError();
+ DeclarationNameInfo ToMemberNameInfo(
+ std::get<0>(*ToMemberNameInfoOrErr), std::get<1>(*ToMemberNameInfoOrErr));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getMemberNameInfo(), MemberNameInfo);
- auto ToFQ = Importer.Import(E->getFirstQualifierFoundInScope());
- if (!ToFQ && E->getFirstQualifierFoundInScope())
- return nullptr;
+ if (Error Err = ImportDeclarationNameLoc(
+ E->getMemberNameInfo(), ToMemberNameInfo))
+ return std::move(Err);
return CXXDependentScopeMemberExpr::Create(
- Importer.getToContext(), Base, BaseType, E->isArrow(),
- Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()),
- cast_or_null<NamedDecl>(ToFQ), MemberNameInfo, ResInfo);
+ Importer.getToContext(), ToBase, ToType, E->isArrow(), ToOperatorLoc,
+ ToQualifierLoc, ToTemplateKeywordLoc, ToFirstQualifierFoundInScope,
+ ToMemberNameInfo, ResInfo);
}
-Expr *
+ExpectedStmt
ASTNodeImporter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
- DeclarationName Name = Importer.Import(E->getDeclName());
- if (!E->getDeclName().isEmpty() && Name.isEmpty())
- return nullptr;
-
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getExprLoc()));
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
-
- TemplateArgumentListInfo ToTAInfo(Importer.Import(E->getLAngleLoc()),
- Importer.Import(E->getRAngleLoc()));
+ auto Imp = importSeq(
+ E->getQualifierLoc(), E->getTemplateKeywordLoc(), E->getDeclName(),
+ E->getExprLoc(), E->getLAngleLoc(), E->getRAngleLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ NestedNameSpecifierLoc ToQualifierLoc;
+ SourceLocation ToTemplateKeywordLoc, ToExprLoc, ToLAngleLoc, ToRAngleLoc;
+ DeclarationName ToDeclName;
+ std::tie(
+ ToQualifierLoc, ToTemplateKeywordLoc, ToDeclName, ToExprLoc,
+ ToLAngleLoc, ToRAngleLoc) = *Imp;
+
+ DeclarationNameInfo ToNameInfo(ToDeclName, ToExprLoc);
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
+
+ TemplateArgumentListInfo ToTAInfo(ToLAngleLoc, ToRAngleLoc);
TemplateArgumentListInfo *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
return DependentScopeDeclRefExpr::Create(
- Importer.getToContext(), Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo);
+ Importer.getToContext(), ToQualifierLoc, ToTemplateKeywordLoc,
+ ToNameInfo, ResInfo);
}
-Expr *ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
- CXXUnresolvedConstructExpr *CE) {
- unsigned NumArgs = CE->arg_size();
+ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *E) {
+ auto Imp = importSeq(
+ E->getLParenLoc(), E->getRParenLoc(), E->getTypeSourceInfo());
+ if (!Imp)
+ return Imp.takeError();
- SmallVector<Expr *, 8> ToArgs(NumArgs);
- if (ImportArrayChecked(CE->arg_begin(), CE->arg_end(), ToArgs.begin()))
- return nullptr;
+ SourceLocation ToLParenLoc, ToRParenLoc;
+ TypeSourceInfo *ToTypeSourceInfo;
+ std::tie(ToLParenLoc, ToRParenLoc, ToTypeSourceInfo) = *Imp;
+
+ SmallVector<Expr *, 8> ToArgs(E->arg_size());
+ if (Error Err =
+ ImportArrayChecked(E->arg_begin(), E->arg_end(), ToArgs.begin()))
+ return std::move(Err);
return CXXUnresolvedConstructExpr::Create(
- Importer.getToContext(), Importer.Import(CE->getTypeSourceInfo()),
- Importer.Import(CE->getLParenLoc()), llvm::makeArrayRef(ToArgs),
- Importer.Import(CE->getRParenLoc()));
+ Importer.getToContext(), ToTypeSourceInfo, ToLParenLoc,
+ llvm::makeArrayRef(ToArgs), ToRParenLoc);
}
-Expr *ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
- auto *NamingClass =
- cast_or_null<CXXRecordDecl>(Importer.Import(E->getNamingClass()));
- if (E->getNamingClass() && !NamingClass)
- return nullptr;
+ExpectedStmt
+ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ Expected<CXXRecordDecl *> ToNamingClassOrErr = import(E->getNamingClass());
+ if (!ToNamingClassOrErr)
+ return ToNamingClassOrErr.takeError();
- DeclarationName Name = Importer.Import(E->getName());
- if (E->getName() && !Name)
- return nullptr;
+ auto ToQualifierLocOrErr = import(E->getQualifierLoc());
+ if (!ToQualifierLocOrErr)
+ return ToQualifierLocOrErr.takeError();
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ auto ToNameInfoOrErr = importSeq(E->getName(), E->getNameLoc());
+ if (!ToNameInfoOrErr)
+ return ToNameInfoOrErr.takeError();
+ DeclarationNameInfo ToNameInfo(
+ std::get<0>(*ToNameInfoOrErr), std::get<1>(*ToNameInfoOrErr));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
UnresolvedSet<8> ToDecls;
- for (auto *D : E->decls()) {
- if (auto *To = cast_or_null<NamedDecl>(Importer.Import(D)))
- ToDecls.addDecl(To);
+ for (auto *D : E->decls())
+ if (auto ToDOrErr = import(D))
+ ToDecls.addDecl(cast<NamedDecl>(*ToDOrErr));
else
- return nullptr;
- }
+ return ToDOrErr.takeError();
- TemplateArgumentListInfo ToTAInfo, *ResInfo = nullptr;
- if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->getLAngleLoc(), E->getRAngleLoc(),
- E->template_arguments(), ToTAInfo))
- return nullptr;
- ResInfo = &ToTAInfo;
- }
+ if (E->hasExplicitTemplateArgs() && E->getTemplateKeywordLoc().isValid()) {
+ TemplateArgumentListInfo ToTAInfo;
+ if (Error Err = ImportTemplateArgumentListInfo(
+ E->getLAngleLoc(), E->getRAngleLoc(), E->template_arguments(),
+ ToTAInfo))
+ return std::move(Err);
+
+ ExpectedSLoc ToTemplateKeywordLocOrErr = import(E->getTemplateKeywordLoc());
+ if (!ToTemplateKeywordLocOrErr)
+ return ToTemplateKeywordLocOrErr.takeError();
- if (ResInfo || E->getTemplateKeywordLoc().isValid())
return UnresolvedLookupExpr::Create(
- Importer.getToContext(), NamingClass,
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, E->requiresADL(),
- ResInfo, ToDecls.begin(), ToDecls.end());
+ Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
+ *ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo,
+ ToDecls.begin(), ToDecls.end());
+ }
return UnresolvedLookupExpr::Create(
- Importer.getToContext(), NamingClass,
- Importer.Import(E->getQualifierLoc()), NameInfo, E->requiresADL(),
- E->isOverloaded(), ToDecls.begin(), ToDecls.end());
-}
-
-Expr *ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
- DeclarationName Name = Importer.Import(E->getName());
- if (!E->getName().isEmpty() && Name.isEmpty())
- return nullptr;
- DeclarationNameInfo NameInfo(Name, Importer.Import(E->getNameLoc()));
+ Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
+ ToNameInfo, E->requiresADL(), E->isOverloaded(), ToDecls.begin(),
+ ToDecls.end());
+}
+
+ExpectedStmt
+ASTNodeImporter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ auto Imp1 = importSeq(
+ E->getType(), E->getOperatorLoc(), E->getQualifierLoc(),
+ E->getTemplateKeywordLoc());
+ if (!Imp1)
+ return Imp1.takeError();
+
+ QualType ToType;
+ SourceLocation ToOperatorLoc, ToTemplateKeywordLoc;
+ NestedNameSpecifierLoc ToQualifierLoc;
+ std::tie(ToType, ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc) = *Imp1;
+
+ auto Imp2 = importSeq(E->getName(), E->getNameLoc());
+ if (!Imp2)
+ return Imp2.takeError();
+ DeclarationNameInfo ToNameInfo(std::get<0>(*Imp2), std::get<1>(*Imp2));
// Import additional name location/type info.
- ImportDeclarationNameLoc(E->getNameInfo(), NameInfo);
-
- QualType BaseType = Importer.Import(E->getType());
- if (!E->getType().isNull() && BaseType.isNull())
- return nullptr;
+ if (Error Err = ImportDeclarationNameLoc(E->getNameInfo(), ToNameInfo))
+ return std::move(Err);
UnresolvedSet<8> ToDecls;
- for (Decl *D : E->decls()) {
- if (NamedDecl *To = cast_or_null<NamedDecl>(Importer.Import(D)))
- ToDecls.addDecl(To);
+ for (Decl *D : E->decls())
+ if (auto ToDOrErr = import(D))
+ ToDecls.addDecl(cast<NamedDecl>(*ToDOrErr));
else
- return nullptr;
- }
+ return ToDOrErr.takeError();
TemplateArgumentListInfo ToTAInfo;
TemplateArgumentListInfo *ResInfo = nullptr;
if (E->hasExplicitTemplateArgs()) {
- if (ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
- return nullptr;
+ if (Error Err =
+ ImportTemplateArgumentListInfo(E->template_arguments(), ToTAInfo))
+ return std::move(Err);
ResInfo = &ToTAInfo;
}
- Expr *BaseE = E->isImplicitAccess() ? nullptr : Importer.Import(E->getBase());
- if (!BaseE && !E->isImplicitAccess() && E->getBase()) {
- return nullptr;
+ Expr *ToBase = nullptr;
+ if (!E->isImplicitAccess()) {
+ if (ExpectedExpr ToBaseOrErr = import(E->getBase()))
+ ToBase = *ToBaseOrErr;
+ else
+ return ToBaseOrErr.takeError();
}
return UnresolvedMemberExpr::Create(
- Importer.getToContext(), E->hasUnresolvedUsing(), BaseE, BaseType,
- E->isArrow(), Importer.Import(E->getOperatorLoc()),
- Importer.Import(E->getQualifierLoc()),
- Importer.Import(E->getTemplateKeywordLoc()), NameInfo, ResInfo,
- ToDecls.begin(), ToDecls.end());
+ Importer.getToContext(), E->hasUnresolvedUsing(), ToBase, ToType,
+ E->isArrow(), ToOperatorLoc, ToQualifierLoc, ToTemplateKeywordLoc,
+ ToNameInfo, ResInfo, ToDecls.begin(), ToDecls.end());
}
-Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCallExpr(CallExpr *E) {
+ auto Imp = importSeq(E->getCallee(), E->getType(), E->getRParenLoc());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToCallee = Importer.Import(E->getCallee());
- if (!ToCallee && E->getCallee())
- return nullptr;
+ Expr *ToCallee;
+ QualType ToType;
+ SourceLocation ToRParenLoc;
+ std::tie(ToCallee, ToType, ToRParenLoc) = *Imp;
unsigned NumArgs = E->getNumArgs();
- SmallVector<Expr *, 2> ToArgs(NumArgs);
- if (ImportContainerChecked(E->arguments(), ToArgs))
- return nullptr;
-
- auto **ToArgs_Copied = new (Importer.getToContext()) Expr*[NumArgs];
-
- for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai)
- ToArgs_Copied[ai] = ToArgs[ai];
+ llvm::SmallVector<Expr *, 2> ToArgs(NumArgs);
+ if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
+ return std::move(Err);
if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
- return new (Importer.getToContext()) CXXOperatorCallExpr(
- Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, T,
- OCE->getValueKind(), Importer.Import(OCE->getRParenLoc()),
- OCE->getFPFeatures());
+ return CXXOperatorCallExpr::Create(
+ Importer.getToContext(), OCE->getOperator(), ToCallee, ToArgs, ToType,
+ OCE->getValueKind(), ToRParenLoc, OCE->getFPFeatures(),
+ OCE->getADLCallKind());
}
- return new (Importer.getToContext())
- CallExpr(Importer.getToContext(), ToCallee,
- llvm::makeArrayRef(ToArgs_Copied, NumArgs), T, E->getValueKind(),
- Importer.Import(E->getRParenLoc()));
+ return CallExpr::Create(Importer.getToContext(), ToCallee, ToArgs, ToType,
+ E->getValueKind(), ToRParenLoc, /*MinNumArgs=*/0,
+ E->getADLCallKind());
}
-Optional<LambdaCapture>
-ASTNodeImporter::ImportLambdaCapture(const LambdaCapture &From) {
- VarDecl *Var = nullptr;
- if (From.capturesVariable()) {
- Var = cast_or_null<VarDecl>(Importer.Import(From.getCapturedVar()));
- if (!Var)
- return None;
- }
-
- return LambdaCapture(Importer.Import(From.getLocation()), From.isImplicit(),
- From.getCaptureKind(), Var,
- From.isPackExpansion()
- ? Importer.Import(From.getEllipsisLoc())
- : SourceLocation());
-}
-
-Expr *ASTNodeImporter::VisitLambdaExpr(LambdaExpr *LE) {
- CXXRecordDecl *FromClass = LE->getLambdaClass();
- auto *ToClass = dyn_cast_or_null<CXXRecordDecl>(Importer.Import(FromClass));
- if (!ToClass)
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitLambdaExpr(LambdaExpr *E) {
+ CXXRecordDecl *FromClass = E->getLambdaClass();
+ auto ToClassOrErr = import(FromClass);
+ if (!ToClassOrErr)
+ return ToClassOrErr.takeError();
+ CXXRecordDecl *ToClass = *ToClassOrErr;
// NOTE: lambda classes are created with BeingDefined flag set up.
// It means that ImportDefinition doesn't work for them and we should fill it
// manually.
if (ToClass->isBeingDefined()) {
for (auto FromField : FromClass->fields()) {
- auto *ToField = cast_or_null<FieldDecl>(Importer.Import(FromField));
- if (!ToField)
- return nullptr;
+ auto ToFieldOrErr = import(FromField);
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
}
}
- auto *ToCallOp = dyn_cast_or_null<CXXMethodDecl>(
- Importer.Import(LE->getCallOperator()));
- if (!ToCallOp)
- return nullptr;
+ auto ToCallOpOrErr = import(E->getCallOperator());
+ if (!ToCallOpOrErr)
+ return ToCallOpOrErr.takeError();
ToClass->completeDefinition();
- unsigned NumCaptures = LE->capture_size();
- SmallVector<LambdaCapture, 8> Captures;
- Captures.reserve(NumCaptures);
- for (const auto &FromCapture : LE->captures()) {
- if (auto ToCapture = ImportLambdaCapture(FromCapture))
- Captures.push_back(*ToCapture);
+ SmallVector<LambdaCapture, 8> ToCaptures;
+ ToCaptures.reserve(E->capture_size());
+ for (const auto &FromCapture : E->captures()) {
+ if (auto ToCaptureOrErr = import(FromCapture))
+ ToCaptures.push_back(*ToCaptureOrErr);
else
- return nullptr;
+ return ToCaptureOrErr.takeError();
}
- SmallVector<Expr *, 8> InitCaptures(NumCaptures);
- if (ImportContainerChecked(LE->capture_inits(), InitCaptures))
- return nullptr;
+ SmallVector<Expr *, 8> ToCaptureInits(E->capture_size());
+ if (Error Err = ImportContainerChecked(E->capture_inits(), ToCaptureInits))
+ return std::move(Err);
- return LambdaExpr::Create(Importer.getToContext(), ToClass,
- Importer.Import(LE->getIntroducerRange()),
- LE->getCaptureDefault(),
- Importer.Import(LE->getCaptureDefaultLoc()),
- Captures,
- LE->hasExplicitParameters(),
- LE->hasExplicitResultType(),
- InitCaptures,
- Importer.Import(LE->getLocEnd()),
- LE->containsUnexpandedParameterPack());
+ auto Imp = importSeq(
+ E->getIntroducerRange(), E->getCaptureDefaultLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceRange ToIntroducerRange;
+ SourceLocation ToCaptureDefaultLoc, ToEndLoc;
+ std::tie(ToIntroducerRange, ToCaptureDefaultLoc, ToEndLoc) = *Imp;
+
+ return LambdaExpr::Create(
+ Importer.getToContext(), ToClass, ToIntroducerRange,
+ E->getCaptureDefault(), ToCaptureDefaultLoc, ToCaptures,
+ E->hasExplicitParameters(), E->hasExplicitResultType(), ToCaptureInits,
+ ToEndLoc, E->containsUnexpandedParameterPack());
}
-Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
- QualType T = Importer.Import(ILE->getType());
- if (T.isNull())
- return nullptr;
- SmallVector<Expr *, 4> Exprs(ILE->getNumInits());
- if (ImportContainerChecked(ILE->inits(), Exprs))
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitInitListExpr(InitListExpr *E) {
+ auto Imp = importSeq(E->getLBraceLoc(), E->getRBraceLoc(), E->getType());
+ if (!Imp)
+ return Imp.takeError();
+
+ SourceLocation ToLBraceLoc, ToRBraceLoc;
+ QualType ToType;
+ std::tie(ToLBraceLoc, ToRBraceLoc, ToType) = *Imp;
+
+ SmallVector<Expr *, 4> ToExprs(E->getNumInits());
+ if (Error Err = ImportContainerChecked(E->inits(), ToExprs))
+ return std::move(Err);
ASTContext &ToCtx = Importer.getToContext();
InitListExpr *To = new (ToCtx) InitListExpr(
- ToCtx, Importer.Import(ILE->getLBraceLoc()),
- Exprs, Importer.Import(ILE->getLBraceLoc()));
- To->setType(T);
+ ToCtx, ToLBraceLoc, ToExprs, ToRBraceLoc);
+ To->setType(ToType);
- if (ILE->hasArrayFiller()) {
- Expr *Filler = Importer.Import(ILE->getArrayFiller());
- if (!Filler)
- return nullptr;
- To->setArrayFiller(Filler);
+ if (E->hasArrayFiller()) {
+ if (ExpectedExpr ToFillerOrErr = import(E->getArrayFiller()))
+ To->setArrayFiller(*ToFillerOrErr);
+ else
+ return ToFillerOrErr.takeError();
}
- if (FieldDecl *FromFD = ILE->getInitializedFieldInUnion()) {
- auto *ToFD = cast_or_null<FieldDecl>(Importer.Import(FromFD));
- if (!ToFD)
- return nullptr;
- To->setInitializedFieldInUnion(ToFD);
+ if (FieldDecl *FromFD = E->getInitializedFieldInUnion()) {
+ if (auto ToFDOrErr = import(FromFD))
+ To->setInitializedFieldInUnion(*ToFDOrErr);
+ else
+ return ToFDOrErr.takeError();
}
- if (InitListExpr *SyntForm = ILE->getSyntacticForm()) {
- auto *ToSyntForm = cast_or_null<InitListExpr>(Importer.Import(SyntForm));
- if (!ToSyntForm)
- return nullptr;
- To->setSyntacticForm(ToSyntForm);
+ if (InitListExpr *SyntForm = E->getSyntacticForm()) {
+ if (auto ToSyntFormOrErr = import(SyntForm))
+ To->setSyntacticForm(*ToSyntFormOrErr);
+ else
+ return ToSyntFormOrErr.takeError();
}
- To->sawArrayRangeDesignator(ILE->hadArrayRangeDesignator());
- To->setValueDependent(ILE->isValueDependent());
- To->setInstantiationDependent(ILE->isInstantiationDependent());
+ // Copy InitListExprBitfields, which are not handled in the ctor of
+ // InitListExpr.
+ To->sawArrayRangeDesignator(E->hadArrayRangeDesignator());
return To;
}
-Expr *ASTNodeImporter::VisitCXXStdInitializerListExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXStdInitializerListExpr(
CXXStdInitializerListExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- Expr *SE = Importer.Import(E->getSubExpr());
- if (!SE)
- return nullptr;
+ ExpectedExpr ToSubExprOrErr = import(E->getSubExpr());
+ if (!ToSubExprOrErr)
+ return ToSubExprOrErr.takeError();
- return new (Importer.getToContext()) CXXStdInitializerListExpr(T, SE);
+ return new (Importer.getToContext()) CXXStdInitializerListExpr(
+ *ToTypeOrErr, *ToSubExprOrErr);
}
-Expr *ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
+ExpectedStmt ASTNodeImporter::VisitCXXInheritedCtorInitExpr(
CXXInheritedCtorInitExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ auto Imp = importSeq(E->getLocation(), E->getType(), E->getConstructor());
+ if (!Imp)
+ return Imp.takeError();
- auto *Ctor = cast_or_null<CXXConstructorDecl>(Importer.Import(
- E->getConstructor()));
- if (!Ctor)
- return nullptr;
+ SourceLocation ToLocation;
+ QualType ToType;
+ CXXConstructorDecl *ToConstructor;
+ std::tie(ToLocation, ToType, ToConstructor) = *Imp;
return new (Importer.getToContext()) CXXInheritedCtorInitExpr(
- Importer.Import(E->getLocation()), T, Ctor,
- E->constructsVBase(), E->inheritedFromVBase());
+ ToLocation, ToType, ToConstructor, E->constructsVBase(),
+ E->inheritedFromVBase());
}
-Expr *ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
-
- Expr *ToCommon = Importer.Import(E->getCommonExpr());
- if (!ToCommon && E->getCommonExpr())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ auto Imp = importSeq(E->getType(), E->getCommonExpr(), E->getSubExpr());
+ if (!Imp)
+ return Imp.takeError();
- Expr *ToSubExpr = Importer.Import(E->getSubExpr());
- if (!ToSubExpr && E->getSubExpr())
- return nullptr;
+ QualType ToType;
+ Expr *ToCommonExpr, *ToSubExpr;
+ std::tie(ToType, ToCommonExpr, ToSubExpr) = *Imp;
- return new (Importer.getToContext())
- ArrayInitLoopExpr(ToType, ToCommon, ToSubExpr);
+ return new (Importer.getToContext()) ArrayInitLoopExpr(
+ ToType, ToCommonExpr, ToSubExpr);
}
-Expr *ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
- return new (Importer.getToContext()) ArrayInitIndexExpr(ToType);
+ExpectedStmt ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ return new (Importer.getToContext()) ArrayInitIndexExpr(*ToTypeOrErr);
}
-Expr *ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
- auto *ToField = dyn_cast_or_null<FieldDecl>(Importer.Import(DIE->getField()));
- if (!ToField && DIE->getField())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ ExpectedSLoc ToBeginLocOrErr = import(E->getBeginLoc());
+ if (!ToBeginLocOrErr)
+ return ToBeginLocOrErr.takeError();
+
+ auto ToFieldOrErr = import(E->getField());
+ if (!ToFieldOrErr)
+ return ToFieldOrErr.takeError();
return CXXDefaultInitExpr::Create(
- Importer.getToContext(), Importer.Import(DIE->getLocStart()), ToField);
+ Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr);
}
-Expr *ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull() && !E->getType().isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getSubExpr(), E->getTypeInfoAsWritten(),
+ E->getOperatorLoc(), E->getRParenLoc(), E->getAngleBrackets());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ Expr *ToSubExpr;
+ TypeSourceInfo *ToTypeInfoAsWritten;
+ SourceLocation ToOperatorLoc, ToRParenLoc;
+ SourceRange ToAngleBrackets;
+ std::tie(
+ ToType, ToSubExpr, ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc,
+ ToAngleBrackets) = *Imp;
+
ExprValueKind VK = E->getValueKind();
CastKind CK = E->getCastKind();
- Expr *ToOp = Importer.Import(E->getSubExpr());
- if (!ToOp && E->getSubExpr())
- return nullptr;
- CXXCastPath BasePath;
- if (ImportCastPath(E, BasePath))
- return nullptr;
- TypeSourceInfo *ToWritten = Importer.Import(E->getTypeInfoAsWritten());
- SourceLocation ToOperatorLoc = Importer.Import(E->getOperatorLoc());
- SourceLocation ToRParenLoc = Importer.Import(E->getRParenLoc());
- SourceRange ToAngleBrackets = Importer.Import(E->getAngleBrackets());
+ auto ToBasePathOrErr = ImportCastPath(E);
+ if (!ToBasePathOrErr)
+ return ToBasePathOrErr.takeError();
if (isa<CXXStaticCastExpr>(E)) {
return CXXStaticCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else if (isa<CXXDynamicCastExpr>(E)) {
return CXXDynamicCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else if (isa<CXXReinterpretCastExpr>(E)) {
return CXXReinterpretCastExpr::Create(
- Importer.getToContext(), ToType, VK, CK, ToOp, &BasePath,
- ToWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ Importer.getToContext(), ToType, VK, CK, ToSubExpr, &(*ToBasePathOrErr),
+ ToTypeInfoAsWritten, ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
+ } else if (isa<CXXConstCastExpr>(E)) {
+ return CXXConstCastExpr::Create(
+ Importer.getToContext(), ToType, VK, ToSubExpr, ToTypeInfoAsWritten,
+ ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else {
- return nullptr;
+ llvm_unreachable("Unknown cast type");
+ return make_error<ImportError>();
}
}
-Expr *ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
+ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
- QualType T = Importer.Import(E->getType());
- if (T.isNull())
- return nullptr;
+ auto Imp = importSeq(
+ E->getType(), E->getExprLoc(), E->getParameter(), E->getReplacement());
+ if (!Imp)
+ return Imp.takeError();
- auto *Param = cast_or_null<NonTypeTemplateParmDecl>(
- Importer.Import(E->getParameter()));
- if (!Param)
- return nullptr;
-
- Expr *Replacement = Importer.Import(E->getReplacement());
- if (!Replacement)
- return nullptr;
+ QualType ToType;
+ SourceLocation ToExprLoc;
+ NonTypeTemplateParmDecl *ToParameter;
+ Expr *ToReplacement;
+ std::tie(ToType, ToExprLoc, ToParameter, ToReplacement) = *Imp;
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
- T, E->getValueKind(), Importer.Import(E->getExprLoc()), Param,
- Replacement);
+ ToType, E->getValueKind(), ToExprLoc, ToParameter, ToReplacement);
}
-Expr *ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ auto Imp = importSeq(
+ E->getType(), E->getBeginLoc(), E->getEndLoc());
+ if (!Imp)
+ return Imp.takeError();
+
+ QualType ToType;
+ SourceLocation ToBeginLoc, ToEndLoc;
+ std::tie(ToType, ToBeginLoc, ToEndLoc) = *Imp;
SmallVector<TypeSourceInfo *, 4> ToArgs(E->getNumArgs());
- if (ImportContainerChecked(E->getArgs(), ToArgs))
- return nullptr;
+ if (Error Err = ImportContainerChecked(E->getArgs(), ToArgs))
+ return std::move(Err);
// According to Sema::BuildTypeTrait(), if E is value-dependent,
// Value is always false.
- bool ToValue = false;
- if (!E->isValueDependent())
- ToValue = E->getValue();
+ bool ToValue = (E->isValueDependent() ? false : E->getValue());
return TypeTraitExpr::Create(
- Importer.getToContext(), ToType, Importer.Import(E->getLocStart()),
- E->getTrait(), ToArgs, Importer.Import(E->getLocEnd()), ToValue);
+ Importer.getToContext(), ToType, ToBeginLoc, E->getTrait(), ToArgs,
+ ToEndLoc, ToValue);
}
-Expr *ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
- QualType ToType = Importer.Import(E->getType());
- if (ToType.isNull())
- return nullptr;
+ExpectedStmt ASTNodeImporter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ ExpectedType ToTypeOrErr = import(E->getType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
- if (E->isTypeOperand()) {
- TypeSourceInfo *TSI = Importer.Import(E->getTypeOperandSourceInfo());
- if (!TSI)
- return nullptr;
+ auto ToSourceRangeOrErr = import(E->getSourceRange());
+ if (!ToSourceRangeOrErr)
+ return ToSourceRangeOrErr.takeError();
- return new (Importer.getToContext())
- CXXTypeidExpr(ToType, TSI, Importer.Import(E->getSourceRange()));
+ if (E->isTypeOperand()) {
+ if (auto ToTSIOrErr = import(E->getTypeOperandSourceInfo()))
+ return new (Importer.getToContext()) CXXTypeidExpr(
+ *ToTypeOrErr, *ToTSIOrErr, *ToSourceRangeOrErr);
+ else
+ return ToTSIOrErr.takeError();
}
- Expr *Op = Importer.Import(E->getExprOperand());
- if (!Op)
- return nullptr;
+ ExpectedExpr ToExprOperandOrErr = import(E->getExprOperand());
+ if (!ToExprOperandOrErr)
+ return ToExprOperandOrErr.takeError();
- return new (Importer.getToContext())
- CXXTypeidExpr(ToType, Op, Importer.Import(E->getSourceRange()));
+ return new (Importer.getToContext()) CXXTypeidExpr(
+ *ToTypeOrErr, *ToExprOperandOrErr, *ToSourceRangeOrErr);
}
void ASTNodeImporter::ImportOverrides(CXXMethodDecl *ToMethod,
CXXMethodDecl *FromMethod) {
- for (auto *FromOverriddenMethod : FromMethod->overridden_methods())
- ToMethod->addOverriddenMethod(
- cast<CXXMethodDecl>(Importer.Import(const_cast<CXXMethodDecl*>(
- FromOverriddenMethod))));
+ for (auto *FromOverriddenMethod : FromMethod->overridden_methods()) {
+ if (auto ImportedOrErr = import(FromOverriddenMethod))
+ ToMethod->getCanonicalDecl()->addOverriddenMethod(cast<CXXMethodDecl>(
+ (*ImportedOrErr)->getCanonicalDecl()));
+ else
+ consumeError(ImportedOrErr.takeError());
+ }
}
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager,
- bool MinimalImport)
- : ToContext(ToContext), FromContext(FromContext),
+ bool MinimalImport,
+ ASTImporterLookupTable *LookupTable)
+ : LookupTable(LookupTable), ToContext(ToContext), FromContext(FromContext),
ToFileManager(ToFileManager), FromFileManager(FromFileManager),
Minimal(MinimalImport) {
- ImportedDecls[FromContext.getTranslationUnitDecl()]
- = ToContext.getTranslationUnitDecl();
+
+ ImportedDecls[FromContext.getTranslationUnitDecl()] =
+ ToContext.getTranslationUnitDecl();
}
ASTImporter::~ASTImporter() = default;
+Expected<QualType> ASTImporter::Import_New(QualType FromT) {
+ QualType ToT = Import(FromT);
+ if (ToT.isNull() && !FromT.isNull())
+ return make_error<ImportError>();
+ return ToT;
+}
+
+Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
+ assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) &&
+ "Try to get field index for non-field.");
+
+ auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
+ if (!Owner)
+ return None;
+
+ unsigned Index = 0;
+ for (const auto *D : Owner->decls()) {
+ if (D == F)
+ return Index;
+
+ if (isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D))
+ ++Index;
+ }
+
+ llvm_unreachable("Field was not found in its parent context.");
+
+ return None;
+}
+
+ASTImporter::FoundDeclsTy
+ASTImporter::findDeclsInToCtx(DeclContext *DC, DeclarationName Name) {
+ // We search in the redecl context because of transparent contexts.
+ // E.g. a simple C language enum is a transparent context:
+ // enum E { A, B };
+ // Now if we had a global variable in the TU
+ // int A;
+ // then the enum constant 'A' and the variable 'A' violates ODR.
+ // We can diagnose this only if we search in the redecl context.
+ DeclContext *ReDC = DC->getRedeclContext();
+ if (LookupTable) {
+ ASTImporterLookupTable::LookupResult LookupResult =
+ LookupTable->lookup(ReDC, Name);
+ return FoundDeclsTy(LookupResult.begin(), LookupResult.end());
+ } else {
+ // FIXME Can we remove this kind of lookup?
+ // Or lldb really needs this C/C++ lookup?
+ FoundDeclsTy Result;
+ ReDC->localUncachedLookup(Name, Result);
+ return Result;
+ }
+}
+
+void ASTImporter::AddToLookupTable(Decl *ToD) {
+ if (LookupTable)
+ if (auto *ToND = dyn_cast<NamedDecl>(ToD))
+ LookupTable->add(ToND);
+}
+
QualType ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return {};
- const Type *fromTy = FromT.getTypePtr();
+ const Type *FromTy = FromT.getTypePtr();
// Check whether we've already imported this type.
llvm::DenseMap<const Type *, const Type *>::iterator Pos
- = ImportedTypes.find(fromTy);
+ = ImportedTypes.find(FromTy);
if (Pos != ImportedTypes.end())
return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
// Import the type
ASTNodeImporter Importer(*this);
- QualType ToT = Importer.Visit(fromTy);
- if (ToT.isNull())
- return ToT;
+ ExpectedType ToTOrErr = Importer.Visit(FromTy);
+ if (!ToTOrErr) {
+ llvm::consumeError(ToTOrErr.takeError());
+ return {};
+ }
// Record the imported type.
- ImportedTypes[fromTy] = ToT.getTypePtr();
+ ImportedTypes[FromTy] = (*ToTOrErr).getTypePtr();
- return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
+ return ToContext.getQualifiedType(*ToTOrErr, FromT.getLocalQualifiers());
}
+Expected<TypeSourceInfo *> ASTImporter::Import_New(TypeSourceInfo *FromTSI) {
+ TypeSourceInfo *ToTSI = Import(FromTSI);
+ if (!ToTSI && FromTSI)
+ return llvm::make_error<ImportError>();
+ return ToTSI;
+}
TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
if (!FromTSI)
return FromTSI;
@@ -6972,28 +7724,34 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
if (T.isNull())
return nullptr;
- return ToContext.getTrivialTypeSourceInfo(T,
- Import(FromTSI->getTypeLoc().getLocStart()));
+ return ToContext.getTrivialTypeSourceInfo(
+ T, Import(FromTSI->getTypeLoc().getBeginLoc()));
}
+Expected<Attr *> ASTImporter::Import_New(const Attr *FromAttr) {
+ return Import(FromAttr);
+}
Attr *ASTImporter::Import(const Attr *FromAttr) {
Attr *ToAttr = FromAttr->clone(ToContext);
+ // NOTE: Import of SourceRange may fail.
ToAttr->setRange(Import(FromAttr->getRange()));
return ToAttr;
}
-Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) {
- llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
- if (Pos != ImportedDecls.end()) {
- Decl *ToD = Pos->second;
- // FIXME: move this call to ImportDeclParts().
- ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD);
- return ToD;
- } else {
+Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const {
+ auto Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end())
+ return Pos->second;
+ else
return nullptr;
- }
}
+Expected<Decl *> ASTImporter::Import_New(Decl *FromD) {
+ Decl *ToD = Import(FromD);
+ if (!ToD && FromD)
+ return llvm::make_error<ImportError>();
+ return ToD;
+}
Decl *ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
@@ -7009,17 +7767,26 @@ Decl *ASTImporter::Import(Decl *FromD) {
}
// Import the type.
- ToD = Importer.Visit(FromD);
- if (!ToD)
+ ExpectedDecl ToDOrErr = Importer.Visit(FromD);
+ if (!ToDOrErr) {
+ llvm::consumeError(ToDOrErr.takeError());
return nullptr;
+ }
+ ToD = *ToDOrErr;
+
+ // Once the decl is connected to the existing declarations, i.e. when the
+ // redecl chain is properly set then we populate the lookup again.
+ // This way the primary context will be able to find all decls.
+ AddToLookupTable(ToD);
// Notify subclasses.
Imported(FromD, ToD);
+ updateFlags(FromD, ToD);
return ToD;
}
-DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
+Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
@@ -7034,8 +7801,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToRecord->isCompleteDefinition()) {
// Do nothing.
} else if (FromRecord->isCompleteDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromRecord, ToRecord, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToRecord);
}
@@ -7044,8 +7812,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToEnum->isCompleteDefinition()) {
// Do nothing.
} else if (FromEnum->isCompleteDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromEnum, ToEnum, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToEnum);
}
@@ -7054,8 +7823,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToClass->getDefinition()) {
// Do nothing.
} else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromDef, ToClass, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToClass);
}
@@ -7064,8 +7834,9 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
if (ToProto->getDefinition()) {
// Do nothing.
} else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
- ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
- ASTNodeImporter::IDK_Basic);
+ if (Error Err = ASTNodeImporter(*this).ImportDefinition(
+ FromDef, ToProto, ASTNodeImporter::IDK_Basic))
+ return std::move(Err);
} else {
CompleteDecl(ToProto);
}
@@ -7074,6 +7845,12 @@ DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
return ToDC;
}
+Expected<Expr *> ASTImporter::Import_New(Expr *FromE) {
+ Expr *ToE = Import(FromE);
+ if (!ToE && FromE)
+ return llvm::make_error<ImportError>();
+ return ToE;
+}
Expr *ASTImporter::Import(Expr *FromE) {
if (!FromE)
return nullptr;
@@ -7081,6 +7858,12 @@ Expr *ASTImporter::Import(Expr *FromE) {
return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
}
+Expected<Stmt *> ASTImporter::Import_New(Stmt *FromS) {
+ Stmt *ToS = Import(FromS);
+ if (!ToS && FromS)
+ return llvm::make_error<ImportError>();
+ return ToS;
+}
Stmt *ASTImporter::Import(Stmt *FromS) {
if (!FromS)
return nullptr;
@@ -7090,17 +7873,39 @@ Stmt *ASTImporter::Import(Stmt *FromS) {
if (Pos != ImportedStmts.end())
return Pos->second;
- // Import the type
+ // Import the statement.
ASTNodeImporter Importer(*this);
- Stmt *ToS = Importer.Visit(FromS);
- if (!ToS)
+ ExpectedStmt ToSOrErr = Importer.Visit(FromS);
+ if (!ToSOrErr) {
+ llvm::consumeError(ToSOrErr.takeError());
return nullptr;
+ }
+
+ if (auto *ToE = dyn_cast<Expr>(*ToSOrErr)) {
+ auto *FromE = cast<Expr>(FromS);
+ // Copy ExprBitfields, which may not be handled in Expr subclasses
+ // constructors.
+ ToE->setValueKind(FromE->getValueKind());
+ ToE->setObjectKind(FromE->getObjectKind());
+ ToE->setTypeDependent(FromE->isTypeDependent());
+ ToE->setValueDependent(FromE->isValueDependent());
+ ToE->setInstantiationDependent(FromE->isInstantiationDependent());
+ ToE->setContainsUnexpandedParameterPack(
+ FromE->containsUnexpandedParameterPack());
+ }
// Record the imported declaration.
- ImportedStmts[FromS] = ToS;
- return ToS;
+ ImportedStmts[FromS] = *ToSOrErr;
+ return *ToSOrErr;
}
+Expected<NestedNameSpecifier *>
+ASTImporter::Import_New(NestedNameSpecifier *FromNNS) {
+ NestedNameSpecifier *ToNNS = Import(FromNNS);
+ if (!ToNNS && FromNNS)
+ return llvm::make_error<ImportError>();
+ return ToNNS;
+}
NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
if (!FromNNS)
return nullptr;
@@ -7154,6 +7959,11 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
llvm_unreachable("Invalid nested name specifier kind");
}
+Expected<NestedNameSpecifierLoc>
+ASTImporter::Import_New(NestedNameSpecifierLoc FromNNS) {
+ NestedNameSpecifierLoc ToNNS = Import(FromNNS);
+ return ToNNS;
+}
NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
// Copied from NestedNameSpecifier mostly.
SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
@@ -7225,6 +8035,12 @@ NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
return Builder.getWithLocInContext(getToContext());
}
+Expected<TemplateName> ASTImporter::Import_New(TemplateName From) {
+ TemplateName To = Import(From);
+ if (To.isNull() && !From.isNull())
+ return llvm::make_error<ImportError>();
+ return To;
+}
TemplateName ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
@@ -7301,18 +8117,26 @@ TemplateName ASTImporter::Import(TemplateName From) {
return {};
ASTNodeImporter Importer(*this);
- TemplateArgument ArgPack
+ Expected<TemplateArgument> ArgPack
= Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
- if (ArgPack.isNull())
+ if (!ArgPack) {
+ llvm::consumeError(ArgPack.takeError());
return {};
+ }
- return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ return ToContext.getSubstTemplateTemplateParmPack(Param, *ArgPack);
}
}
llvm_unreachable("Invalid template name kind");
}
+Expected<SourceLocation> ASTImporter::Import_New(SourceLocation FromLoc) {
+ SourceLocation ToLoc = Import(FromLoc);
+ if (ToLoc.isInvalid() && !FromLoc.isInvalid())
+ return llvm::make_error<ImportError>();
+ return ToLoc;
+}
SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
if (FromLoc.isInvalid())
return {};
@@ -7327,10 +8151,20 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
return ToSM.getComposedLoc(ToFileID, Decomposed.second);
}
+Expected<SourceRange> ASTImporter::Import_New(SourceRange FromRange) {
+ SourceRange ToRange = Import(FromRange);
+ return ToRange;
+}
SourceRange ASTImporter::Import(SourceRange FromRange) {
return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
}
+Expected<FileID> ASTImporter::Import_New(FileID FromID) {
+ FileID ToID = Import(FromID);
+ if (ToID.isInvalid() && FromID.isValid())
+ return llvm::make_error<ImportError>();
+ return ToID;
+}
FileID ASTImporter::Import(FileID FromID) {
llvm::DenseMap<FileID, FileID>::iterator Pos = ImportedFileIDs.find(FromID);
if (Pos != ImportedFileIDs.end())
@@ -7388,6 +8222,13 @@ FileID ASTImporter::Import(FileID FromID) {
return ToID;
}
+Expected<CXXCtorInitializer *>
+ASTImporter::Import_New(CXXCtorInitializer *From) {
+ CXXCtorInitializer *To = Import(From);
+ if (!To && From)
+ return llvm::make_error<ImportError>();
+ return To;
+}
CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
Expr *ToExpr = Import(From->getInit());
if (!ToExpr && From->getInit())
@@ -7433,6 +8274,13 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
}
}
+Expected<CXXBaseSpecifier *>
+ASTImporter::Import_New(const CXXBaseSpecifier *From) {
+ CXXBaseSpecifier *To = Import(From);
+ if (!To && From)
+ return llvm::make_error<ImportError>();
+ return To;
+}
CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec);
if (Pos != ImportedCXXBaseSpecifiers.end())
@@ -7448,50 +8296,62 @@ CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
return Imported;
}
-void ASTImporter::ImportDefinition(Decl *From) {
+Error ASTImporter::ImportDefinition_New(Decl *From) {
Decl *To = Import(From);
if (!To)
- return;
+ return llvm::make_error<ImportError>();
if (auto *FromDC = cast<DeclContext>(From)) {
ASTNodeImporter Importer(*this);
if (auto *ToRecord = dyn_cast<RecordDecl>(To)) {
if (!ToRecord->getDefinition()) {
- Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToEnum = dyn_cast<EnumDecl>(To)) {
if (!ToEnum->getDefinition()) {
- Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<EnumDecl>(FromDC), ToEnum, ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
if (!ToIFace->getDefinition()) {
- Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
}
}
if (auto *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
if (!ToProto->getDefinition()) {
- Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
- ASTNodeImporter::IDK_Everything);
- return;
+ return Importer.ImportDefinition(
+ cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
}
}
- Importer.ImportDeclContext(FromDC, true);
+ return Importer.ImportDeclContext(FromDC, true);
}
+
+ return Error::success();
}
+void ASTImporter::ImportDefinition(Decl *From) {
+ Error Err = ImportDefinition_New(From);
+ llvm::consumeError(std::move(Err));
+}
+
+Expected<DeclarationName> ASTImporter::Import_New(DeclarationName FromName) {
+ DeclarationName ToName = Import(FromName);
+ if (!ToName && FromName)
+ return llvm::make_error<ImportError>();
+ return ToName;
+}
DeclarationName ASTImporter::Import(DeclarationName FromName) {
if (!FromName)
return {};
@@ -7568,6 +8428,12 @@ IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
return ToId;
}
+Expected<Selector> ASTImporter::Import_New(Selector FromSel) {
+ Selector ToSel = Import(FromSel);
+ if (ToSel.isNull() && !FromSel.isNull())
+ return llvm::make_error<ImportError>();
+ return ToSel;
+}
Selector ASTImporter::Import(Selector FromSel) {
if (FromSel.isNull())
return {};
diff --git a/lib/AST/ASTImporterLookupTable.cpp b/lib/AST/ASTImporterLookupTable.cpp
new file mode 100644
index 000000000000..fbcd4f5cb341
--- /dev/null
+++ b/lib/AST/ASTImporterLookupTable.cpp
@@ -0,0 +1,129 @@
+//===- ASTImporterLookupTable.cpp - ASTImporter specific lookup -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImporterLookupTable class which implements a
+// lookup procedure for the import mechanism.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTImporterLookupTable.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+
+namespace clang {
+
+namespace {
+
+struct Builder : RecursiveASTVisitor<Builder> {
+ ASTImporterLookupTable &LT;
+ Builder(ASTImporterLookupTable &LT) : LT(LT) {}
+ bool VisitNamedDecl(NamedDecl *D) {
+ LT.add(D);
+ return true;
+ }
+ bool VisitFriendDecl(FriendDecl *D) {
+ if (D->getFriendType()) {
+ QualType Ty = D->getFriendType()->getType();
+ // FIXME Can this be other than elaborated?
+ QualType NamedTy = cast<ElaboratedType>(Ty)->getNamedType();
+ if (!NamedTy->isDependentType()) {
+ if (const auto *RTy = dyn_cast<RecordType>(NamedTy))
+ LT.add(RTy->getAsCXXRecordDecl());
+ else if (const auto *SpecTy =
+ dyn_cast<TemplateSpecializationType>(NamedTy)) {
+ LT.add(SpecTy->getAsCXXRecordDecl());
+ }
+ }
+ }
+ return true;
+ }
+
+ // Override default settings of base.
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return true; }
+};
+
+} // anonymous namespace
+
+ASTImporterLookupTable::ASTImporterLookupTable(TranslationUnitDecl &TU) {
+ Builder B(*this);
+ B.TraverseDecl(&TU);
+}
+
+void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) {
+ DeclList &Decls = LookupTable[DC][ND->getDeclName()];
+ // Inserts if and only if there is no element in the container equal to it.
+ Decls.insert(ND);
+}
+
+void ASTImporterLookupTable::remove(DeclContext *DC, NamedDecl *ND) {
+ DeclList &Decls = LookupTable[DC][ND->getDeclName()];
+ bool EraseResult = Decls.remove(ND);
+ (void)EraseResult;
+ assert(EraseResult == true && "Trying to remove not contained Decl");
+}
+
+void ASTImporterLookupTable::add(NamedDecl *ND) {
+ assert(ND);
+ DeclContext *DC = ND->getDeclContext()->getPrimaryContext();
+ add(DC, ND);
+ DeclContext *ReDC = DC->getRedeclContext()->getPrimaryContext();
+ if (DC != ReDC)
+ add(ReDC, ND);
+}
+
+void ASTImporterLookupTable::remove(NamedDecl *ND) {
+ assert(ND);
+ DeclContext *DC = ND->getDeclContext()->getPrimaryContext();
+ remove(DC, ND);
+ DeclContext *ReDC = DC->getRedeclContext()->getPrimaryContext();
+ if (DC != ReDC)
+ remove(ReDC, ND);
+}
+
+ASTImporterLookupTable::LookupResult
+ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
+ auto DCI = LookupTable.find(DC->getPrimaryContext());
+ if (DCI == LookupTable.end())
+ return {};
+
+ const auto &FoundNameMap = DCI->second;
+ auto NamesI = FoundNameMap.find(Name);
+ if (NamesI == FoundNameMap.end())
+ return {};
+
+ return NamesI->second;
+}
+
+void ASTImporterLookupTable::dump(DeclContext *DC) const {
+ auto DCI = LookupTable.find(DC->getPrimaryContext());
+ if (DCI == LookupTable.end())
+ llvm::errs() << "empty\n";
+ const auto &FoundNameMap = DCI->second;
+ for (const auto &Entry : FoundNameMap) {
+ DeclarationName Name = Entry.first;
+ llvm::errs() << "==== Name: ";
+ Name.dump();
+ const DeclList& List = Entry.second;
+ for (NamedDecl *ND : List) {
+ ND->dump();
+ }
+ }
+}
+
+void ASTImporterLookupTable::dump() const {
+ for (const auto &Entry : LookupTable) {
+ DeclContext *DC = Entry.first;
+ StringRef Primary = DC->getPrimaryContext() ? " primary" : "";
+ llvm::errs() << "== DC:" << cast<Decl>(DC) << Primary << "\n";
+ dump(DC);
+ }
+}
+
+} // namespace clang
diff --git a/lib/AST/ASTStructuralEquivalence.cpp b/lib/AST/ASTStructuralEquivalence.cpp
index df3c2be9a2e2..d19b89bb95b4 100644
--- a/lib/AST/ASTStructuralEquivalence.cpp
+++ b/lib/AST/ASTStructuralEquivalence.cpp
@@ -911,7 +911,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
-/// Determine structural equivalence of two methodss.
+/// Determine structural equivalence of two methods.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
CXXMethodDecl *Method1,
CXXMethodDecl *Method2) {
@@ -1016,14 +1016,15 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
// Compare the definitions of these two records. If either or both are
- // incomplete, we assume that they are equivalent.
+ // incomplete (i.e. it is a forward decl), we assume that they are
+ // equivalent.
D1 = D1->getDefinition();
D2 = D2->getDefinition();
if (!D1 || !D2)
return true;
// If any of the records has external storage and we do a minimal check (or
- // AST import) we assmue they are equivalent. (If we didn't have this
+ // AST import) we assume they are equivalent. (If we didn't have this
// assumption then `RecordDecl::LoadFieldsFromExternalStorage` could trigger
// another AST import which in turn would call the structural equivalency
// check again and finally we'd have an improper result.)
@@ -1031,6 +1032,11 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (D1->hasExternalLexicalStorage() || D2->hasExternalLexicalStorage())
return true;
+ // If one definition is currently being defined, we do not compare for
+ // equality and we assume that the decls are equal.
+ if (D1->isBeingDefined() || D2->isBeingDefined())
+ return true;
+
if (auto *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
if (auto *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
if (D1CXX->hasExternalLexicalStorage() &&
@@ -1061,9 +1067,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
diag::warn_odr_tag_type_inconsistent)
<< Context.ToCtx.getTypeDeclType(D2);
- Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
+ Context.Diag2(Base2->getBeginLoc(), diag::note_odr_base)
<< Base2->getType() << Base2->getSourceRange();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
<< Base1->getType() << Base1->getSourceRange();
}
return false;
@@ -1075,9 +1081,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(),
diag::warn_odr_tag_type_inconsistent)
<< Context.ToCtx.getTypeDeclType(D2);
- Context.Diag2(Base2->getLocStart(), diag::note_odr_virtual_base)
+ Context.Diag2(Base2->getBeginLoc(), diag::note_odr_virtual_base)
<< Base2->isVirtual() << Base2->getSourceRange();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
<< Base1->isVirtual() << Base1->getSourceRange();
}
return false;
@@ -1126,7 +1132,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
<< Context.ToCtx.getTypeDeclType(D2);
const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
- Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ Context.Diag1(Base1->getBeginLoc(), diag::note_odr_base)
<< Base1->getType() << Base1->getSourceRange();
Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
}
@@ -1178,6 +1184,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
/// Determine structural equivalence of two enums.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EnumDecl *D1, EnumDecl *D2) {
+
+ // Compare the definitions of these two enums. If either or both are
+ // incomplete (i.e. forward declared), we assume that they are equivalent.
+ D1 = D1->getDefinition();
+ D2 = D2->getDefinition();
+ if (!D1 || !D2)
+ return true;
+
EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(),
EC2End = D2->enumerator_end();
for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(),
@@ -1497,6 +1511,141 @@ bool StructuralEquivalenceContext::IsEquivalent(QualType T1, QualType T2) {
return !Finish();
}
+bool StructuralEquivalenceContext::CheckCommonEquivalence(Decl *D1, Decl *D2) {
+ // Check for equivalent described template.
+ TemplateDecl *Template1 = D1->getDescribedTemplate();
+ TemplateDecl *Template2 = D2->getDescribedTemplate();
+ if ((Template1 != nullptr) != (Template2 != nullptr))
+ return false;
+ if (Template1 && !IsStructurallyEquivalent(*this, Template1, Template2))
+ return false;
+
+ // FIXME: Move check for identifier names into this function.
+
+ return true;
+}
+
+bool StructuralEquivalenceContext::CheckKindSpecificEquivalence(
+ Decl *D1, Decl *D2) {
+ // FIXME: Switch on all declaration kinds. For now, we're just going to
+ // check the obvious ones.
+ if (auto *Record1 = dyn_cast<RecordDecl>(D1)) {
+ if (auto *Record2 = dyn_cast<RecordDecl>(D2)) {
+ // Check for equivalent structure names.
+ IdentifierInfo *Name1 = Record1->getIdentifier();
+ if (!Name1 && Record1->getTypedefNameForAnonDecl())
+ Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Record2->getIdentifier();
+ if (!Name2 && Record2->getTypedefNameForAnonDecl())
+ Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Record1, Record2))
+ return false;
+ } else {
+ // Record/non-record mismatch.
+ return false;
+ }
+ } else if (auto *Enum1 = dyn_cast<EnumDecl>(D1)) {
+ if (auto *Enum2 = dyn_cast<EnumDecl>(D2)) {
+ // Check for equivalent enum names.
+ IdentifierInfo *Name1 = Enum1->getIdentifier();
+ if (!Name1 && Enum1->getTypedefNameForAnonDecl())
+ Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Enum2->getIdentifier();
+ if (!Name2 && Enum2->getTypedefNameForAnonDecl())
+ Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Enum1, Enum2))
+ return false;
+ } else {
+ // Enum/non-enum mismatch
+ return false;
+ }
+ } else if (const auto *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
+ if (const auto *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
+ Typedef2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this, Typedef1->getUnderlyingType(),
+ Typedef2->getUnderlyingType()))
+ return false;
+ } else {
+ // Typedef/non-typedef mismatch.
+ return false;
+ }
+ } else if (auto *ClassTemplate1 = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (auto *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, ClassTemplate1,
+ ClassTemplate2))
+ return false;
+ } else {
+ // Class template/non-class-template mismatch.
+ return false;
+ }
+ } else if (auto *FunctionTemplate1 = dyn_cast<FunctionTemplateDecl>(D1)) {
+ if (auto *FunctionTemplate2 = dyn_cast<FunctionTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, FunctionTemplate1,
+ FunctionTemplate2))
+ return false;
+ } else {
+ // Class template/non-class-template mismatch.
+ return false;
+ }
+ } else if (auto *TTP1 = dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (auto *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ } else if (auto *NTTP1 = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (auto *NTTP2 = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ } else if (auto *TTP1 = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (auto *TTP2 = dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ } else if (auto *MD1 = dyn_cast<CXXMethodDecl>(D1)) {
+ if (auto *MD2 = dyn_cast<CXXMethodDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, MD1, MD2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ } else if (FunctionDecl *FD1 = dyn_cast<FunctionDecl>(D1)) {
+ if (FunctionDecl *FD2 = dyn_cast<FunctionDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(FD1->getIdentifier(),
+ FD2->getIdentifier()))
+ return false;
+ if (!::IsStructurallyEquivalent(*this, FD1, FD2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ } else if (FriendDecl *FrD1 = dyn_cast<FriendDecl>(D1)) {
+ if (FriendDecl *FrD2 = dyn_cast<FriendDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, FrD1, FrD2))
+ return false;
+ } else {
+ // Kind mismatch.
+ return false;
+ }
+ }
+
+ return true;
+}
+
bool StructuralEquivalenceContext::Finish() {
while (!DeclsToCheck.empty()) {
// Check the next declaration.
@@ -1506,123 +1655,8 @@ bool StructuralEquivalenceContext::Finish() {
Decl *D2 = TentativeEquivalences[D1];
assert(D2 && "Unrecorded tentative equivalence?");
- bool Equivalent = true;
-
- // FIXME: Switch on all declaration kinds. For now, we're just going to
- // check the obvious ones.
- if (auto *Record1 = dyn_cast<RecordDecl>(D1)) {
- if (auto *Record2 = dyn_cast<RecordDecl>(D2)) {
- // Check for equivalent structure names.
- IdentifierInfo *Name1 = Record1->getIdentifier();
- if (!Name1 && Record1->getTypedefNameForAnonDecl())
- Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = Record2->getIdentifier();
- if (!Name2 && Record2->getTypedefNameForAnonDecl())
- Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!::IsStructurallyEquivalent(Name1, Name2) ||
- !::IsStructurallyEquivalent(*this, Record1, Record2))
- Equivalent = false;
- } else {
- // Record/non-record mismatch.
- Equivalent = false;
- }
- } else if (auto *Enum1 = dyn_cast<EnumDecl>(D1)) {
- if (auto *Enum2 = dyn_cast<EnumDecl>(D2)) {
- // Check for equivalent enum names.
- IdentifierInfo *Name1 = Enum1->getIdentifier();
- if (!Name1 && Enum1->getTypedefNameForAnonDecl())
- Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = Enum2->getIdentifier();
- if (!Name2 && Enum2->getTypedefNameForAnonDecl())
- Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!::IsStructurallyEquivalent(Name1, Name2) ||
- !::IsStructurallyEquivalent(*this, Enum1, Enum2))
- Equivalent = false;
- } else {
- // Enum/non-enum mismatch
- Equivalent = false;
- }
- } else if (const auto *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
- if (const auto *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
- if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
- Typedef2->getIdentifier()) ||
- !::IsStructurallyEquivalent(*this, Typedef1->getUnderlyingType(),
- Typedef2->getUnderlyingType()))
- Equivalent = false;
- } else {
- // Typedef/non-typedef mismatch.
- Equivalent = false;
- }
- } else if (auto *ClassTemplate1 = dyn_cast<ClassTemplateDecl>(D1)) {
- if (auto *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, ClassTemplate1,
- ClassTemplate2))
- Equivalent = false;
- } else {
- // Class template/non-class-template mismatch.
- Equivalent = false;
- }
- } else if (auto *FunctionTemplate1 = dyn_cast<FunctionTemplateDecl>(D1)) {
- if (auto *FunctionTemplate2 = dyn_cast<FunctionTemplateDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, FunctionTemplate1,
- FunctionTemplate2))
- Equivalent = false;
- } else {
- // Class template/non-class-template mismatch.
- Equivalent = false;
- }
- } else if (auto *TTP1 = dyn_cast<TemplateTypeParmDecl>(D1)) {
- if (auto *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- } else if (auto *NTTP1 = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
- if (auto *NTTP2 = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- } else if (auto *TTP1 = dyn_cast<TemplateTemplateParmDecl>(D1)) {
- if (auto *TTP2 = dyn_cast<TemplateTemplateParmDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- } else if (auto *MD1 = dyn_cast<CXXMethodDecl>(D1)) {
- if (auto *MD2 = dyn_cast<CXXMethodDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, MD1, MD2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- } else if (FunctionDecl *FD1 = dyn_cast<FunctionDecl>(D1)) {
- if (FunctionDecl *FD2 = dyn_cast<FunctionDecl>(D2)) {
- if (!::IsStructurallyEquivalent(FD1->getIdentifier(),
- FD2->getIdentifier()))
- Equivalent = false;
- if (!::IsStructurallyEquivalent(*this, FD1, FD2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- } else if (FriendDecl *FrD1 = dyn_cast<FriendDecl>(D1)) {
- if (FriendDecl *FrD2 = dyn_cast<FriendDecl>(D2)) {
- if (!::IsStructurallyEquivalent(*this, FrD1, FrD2))
- Equivalent = false;
- } else {
- // Kind mismatch.
- Equivalent = false;
- }
- }
+ bool Equivalent =
+ CheckCommonEquivalence(D1, D2) && CheckKindSpecificEquivalence(D1, D2);
if (!Equivalent) {
// Note that these two declarations are not equivalent (and we already
@@ -1631,7 +1665,6 @@ bool StructuralEquivalenceContext::Finish() {
std::make_pair(D1->getCanonicalDecl(), D2->getCanonicalDecl()));
return true;
}
- // FIXME: Check other declaration kinds!
}
return false;
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 4f868a3af59e..570ca718acf5 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -10,6 +10,7 @@ add_clang_library(clangAST
ASTDiagnostic.cpp
ASTDumper.cpp
ASTImporter.cpp
+ ASTImporterLookupTable.cpp
ASTStructuralEquivalence.cpp
ASTTypeTraits.cpp
AttrImpl.cpp
@@ -39,6 +40,7 @@ add_clang_library(clangAST
ExprObjC.cpp
ExternalASTMerger.cpp
ExternalASTSource.cpp
+ FormatString.cpp
InheritViz.cpp
ItaniumCXXABI.cpp
ItaniumMangle.cpp
@@ -48,12 +50,15 @@ add_clang_library(clangAST
NestedNameSpecifier.cpp
NSAPI.cpp
ODRHash.cpp
+ OSLog.cpp
OpenMPClause.cpp
ParentMap.cpp
+ PrintfFormatString.cpp
QualTypeNames.cpp
RawCommentList.cpp
RecordLayout.cpp
RecordLayoutBuilder.cpp
+ ScanfFormatString.cpp
SelectorLocationsKind.cpp
Stmt.cpp
StmtCXX.cpp
@@ -65,6 +70,7 @@ add_clang_library(clangAST
StmtViz.cpp
TemplateBase.cpp
TemplateName.cpp
+ TextNodeDumper.cpp
Type.cpp
TypeLoc.cpp
TypePrinter.cpp
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index a0b22b6a85e0..ddb350e72bbd 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -103,7 +103,6 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
- // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
return lookupInBases(
[BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
return FindBaseClass(Specifier, Path, BaseDecl);
@@ -124,7 +123,6 @@ bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
- // FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
return lookupInBases(
[BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
return FindVirtualBaseClass(Specifier, Path, BaseDecl);
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 8d401ff5c7ea..c43275318dd7 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -21,7 +21,7 @@ namespace comments {
void Token::dump(const Lexer &L, const SourceManager &SM) const {
llvm::errs() << "comments::Token Kind=" << Kind << " ";
- Loc.dump(SM);
+ Loc.print(llvm::errs(), SM);
llvm::errs() << " " << Length << " \"" << L.getSpelling(*this, SM) << "\"\n";
}
diff --git a/lib/AST/CommentParser.cpp b/lib/AST/CommentParser.cpp
index c1c04239f58e..7f70b95e9812 100644
--- a/lib/AST/CommentParser.cpp
+++ b/lib/AST/CommentParser.cpp
@@ -558,7 +558,6 @@ BlockContentComment *Parser::parseParagraphOrBlockCommand() {
case tok::verbatim_block_begin:
case tok::verbatim_line_name:
case tok::eof:
- assert(Content.size() != 0);
break; // Block content or EOF ahead, finish this parapgaph.
case tok::unknown_command:
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index 4bc98bf10765..88588a7a89e6 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -703,10 +703,9 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
SmallString<64> TextToInsert(" ");
TextToInsert += AttributeSpelling;
- Diag(FD->getLocEnd(),
- diag::note_add_deprecation_attr)
- << FixItHint::CreateInsertion(FD->getLocEnd().getLocWithOffset(1),
- TextToInsert);
+ Diag(FD->getEndLoc(), diag::note_add_deprecation_attr)
+ << FixItHint::CreateInsertion(FD->getEndLoc().getLocWithOffset(1),
+ TextToInsert);
}
}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 8030dd0c2f41..5536358b1ecf 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Decl.h"
#include "Linkage.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CanonicalType.h"
@@ -49,7 +50,6 @@
#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Visibility.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
@@ -725,7 +725,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
if (useInlineVisibilityHidden(D))
- LV.mergeVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
}
@@ -915,7 +915,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// Note that we do this before merging information about
// the class visibility.
if (!LV.isVisibilityExplicit() && useInlineVisibilityHidden(D))
- LV.mergeVisibility(HiddenVisibility, true);
+ LV.mergeVisibility(HiddenVisibility, /*visibilityExplicit=*/false);
}
// If this class member has an explicit visibility attribute, the only
@@ -1262,7 +1262,27 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
!isTemplateInstantiation(FD->getTemplateSpecializationKind()))
return LinkageInfo::none();
+ // If a function is hidden by -fvisibility-inlines-hidden option and
+ // is not explicitly attributed as a hidden function,
+ // we should not make static local variables in the function hidden.
LV = getLVForDecl(FD, computation);
+ if (isa<VarDecl>(D) && useInlineVisibilityHidden(FD) &&
+ !LV.isVisibilityExplicit()) {
+ assert(cast<VarDecl>(D)->isStaticLocal());
+ // If this was an implicitly hidden inline method, check again for
+ // explicit visibility on the parent class, and use that for static locals
+ // if present.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ LV = getLVForDecl(MD->getParent(), computation);
+ if (!LV.isVisibilityExplicit()) {
+ Visibility globalVisibility =
+ computation.isValueVisibility()
+ ? Context.getLangOpts().getValueVisibilityMode()
+ : Context.getLangOpts().getTypeVisibilityMode();
+ return LinkageInfo(VisibleNoLinkage, globalVisibility,
+ /*visibilityExplicit=*/false);
+ }
+ }
}
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo::none();
@@ -1937,7 +1957,7 @@ VarDecl::TLSKind VarDecl::getTLSKind() const {
SourceRange VarDecl::getSourceRange() const {
if (const Expr *Init = getInit()) {
- SourceLocation InitEnd = Init->getLocEnd();
+ SourceLocation InitEnd = Init->getEndLoc();
// If Init is implicit, ignore its source range and fallback on
// DeclaratorDecl::getSourceRange() to handle postfix elements.
if (InitEnd.isValid() && InitEnd != getLocation())
@@ -2351,6 +2371,14 @@ static DeclT *getDefinitionOrSelf(DeclT *D) {
return D;
}
+bool VarDecl::isEscapingByref() const {
+ return hasAttr<BlocksAttr>() && NonParmVarDeclBits.EscapingByref;
+}
+
+bool VarDecl::isNonEscapingByref() const {
+ return hasAttr<BlocksAttr>() && !NonParmVarDeclBits.EscapingByref;
+}
+
VarDecl *VarDecl::getTemplateInstantiationPattern() const {
// If it's a variable template specialization, find the template or partial
// specialization from which it was instantiated.
@@ -2441,7 +2469,7 @@ bool VarDecl::isKnownToBeDefined() const {
//
// With CUDA relocatable device code enabled, these variables don't get
// special handling; they're treated like regular extern variables.
- if (LangOpts.CUDA && !LangOpts.CUDARelocatableDeviceCode &&
+ if (LangOpts.CUDA && !LangOpts.GPURelocatableDeviceCode &&
hasExternalStorage() && hasAttr<CUDASharedAttr>() &&
isa<IncompleteArrayType>(getType()))
return true;
@@ -2449,6 +2477,12 @@ bool VarDecl::isKnownToBeDefined() const {
return hasDefinition();
}
+bool VarDecl::isNoDestroy(const ASTContext &Ctx) const {
+ return hasGlobalStorage() && (hasAttr<NoDestroyAttr>() ||
+ (!Ctx.getLangOpts().RegisterStaticDestructors &&
+ !hasAttr<AlwaysDestroyAttr>()));
+}
+
MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
if (isStaticDataMember())
// FIXME: Remove ?
@@ -2531,7 +2565,7 @@ SourceRange ParmVarDecl::getSourceRange() const {
// DeclaratorDecl considers the range of postfix types as overlapping with the
// declaration name, but this is not the case with parameters in ObjC methods.
if (isa<ObjCMethodDecl>(getDeclContext()))
- return SourceRange(DeclaratorDecl::getLocStart(), getLocation());
+ return SourceRange(DeclaratorDecl::getBeginLoc(), getLocation());
return DeclaratorDecl::getSourceRange();
}
@@ -2542,7 +2576,7 @@ Expr *ParmVarDecl::getDefaultArg() {
"Default argument is not yet instantiated!");
Expr *Arg = getInit();
- if (auto *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ if (auto *E = dyn_cast_or_null<FullExpr>(Arg))
return E->getSubExpr();
return Arg;
@@ -2609,6 +2643,41 @@ unsigned ParmVarDecl::getParameterIndexLarge() const {
// FunctionDecl Implementation
//===----------------------------------------------------------------------===//
+FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, StorageClass S,
+ bool isInlineSpecified, bool isConstexprSpecified)
+ : DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo,
+ StartLoc),
+ DeclContext(DK), redeclarable_base(C), ODRHash(0),
+ EndRangeLoc(NameInfo.getEndLoc()), DNLoc(NameInfo.getInfo()) {
+ assert(T.isNull() || T->isFunctionType());
+ FunctionDeclBits.SClass = S;
+ FunctionDeclBits.IsInline = isInlineSpecified;
+ FunctionDeclBits.IsInlineSpecified = isInlineSpecified;
+ FunctionDeclBits.IsExplicitSpecified = false;
+ FunctionDeclBits.IsVirtualAsWritten = false;
+ FunctionDeclBits.IsPure = false;
+ FunctionDeclBits.HasInheritedPrototype = false;
+ FunctionDeclBits.HasWrittenPrototype = true;
+ FunctionDeclBits.IsDeleted = false;
+ FunctionDeclBits.IsTrivial = false;
+ FunctionDeclBits.IsTrivialForCall = false;
+ FunctionDeclBits.IsDefaulted = false;
+ FunctionDeclBits.IsExplicitlyDefaulted = false;
+ FunctionDeclBits.HasImplicitReturnZero = false;
+ FunctionDeclBits.IsLateTemplateParsed = false;
+ FunctionDeclBits.IsConstexpr = isConstexprSpecified;
+ FunctionDeclBits.InstantiationIsPending = false;
+ FunctionDeclBits.UsesSEHTry = false;
+ FunctionDeclBits.HasSkippedBody = false;
+ FunctionDeclBits.WillHaveBody = false;
+ FunctionDeclBits.IsMultiVersion = false;
+ FunctionDeclBits.IsCopyDeductionCandidate = false;
+ FunctionDeclBits.HasODRHash = false;
+}
+
void FunctionDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
@@ -2672,11 +2741,11 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
void FunctionDecl::setBody(Stmt *B) {
Body = B;
if (B)
- EndRangeLoc = B->getLocEnd();
+ EndRangeLoc = B->getEndLoc();
}
void FunctionDecl::setPure(bool P) {
- IsPure = P;
+ FunctionDeclBits.IsPure = P;
if (P)
if (auto *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
Parent->markedVirtualFunctionPure();
@@ -2873,6 +2942,17 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+
+MultiVersionKind FunctionDecl::getMultiVersionKind() const {
+ if (hasAttr<TargetAttr>())
+ return MultiVersionKind::Target;
+ if (hasAttr<CPUDispatchAttr>())
+ return MultiVersionKind::CPUDispatch;
+ if (hasAttr<CPUSpecificAttr>())
+ return MultiVersionKind::CPUSpecific;
+ return MultiVersionKind::None;
+}
+
bool FunctionDecl::isCPUDispatchMultiVersion() const {
return isMultiVersion() && hasAttr<CPUDispatchAttr>();
}
@@ -2881,6 +2961,10 @@ bool FunctionDecl::isCPUSpecificMultiVersion() const {
return isMultiVersion() && hasAttr<CPUSpecificAttr>();
}
+bool FunctionDecl::isTargetMultiVersion() const {
+ return isMultiVersion() && hasAttr<TargetAttr>();
+}
+
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);
@@ -2892,8 +2976,8 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
FunTmpl->setPreviousDecl(PrevFunTmpl);
}
- if (PrevDecl && PrevDecl->IsInline)
- IsInline = true;
+ if (PrevDecl && PrevDecl->isInlined())
+ setImplicitlyInline(true);
}
FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); }
@@ -3127,7 +3211,7 @@ SourceRange FunctionDecl::getReturnTypeSourceRange() const {
// Skip self-referential return types.
const SourceManager &SM = getASTContext().getSourceManager();
SourceRange RTRange = FTL.getReturnLoc().getSourceRange();
- SourceLocation Boundary = getNameInfo().getLocStart();
+ SourceLocation Boundary = getNameInfo().getBeginLoc();
if (RTRange.isInvalid() || Boundary.isInvalid() ||
!SM.isBeforeInTranslationUnit(RTRange.getEnd(), Boundary))
return SourceRange();
@@ -3147,20 +3231,6 @@ SourceRange FunctionDecl::getExceptionSpecSourceRange() const {
return FTL.getExceptionSpecRange();
}
-const Attr *FunctionDecl::getUnusedResultAttr() const {
- QualType RetType = getReturnType();
- if (const auto *Ret = RetType->getAsRecordDecl()) {
- if (const auto *R = Ret->getAttr<WarnUnusedResultAttr>())
- return R;
- } else if (const auto *ET = RetType->getAs<EnumType>()) {
- if (const EnumDecl *ED = ET->getDecl()) {
- if (const auto *R = ED->getAttr<WarnUnusedResultAttr>())
- return R;
- }
- }
- return getAttr<WarnUnusedResultAttr>();
-}
-
/// For an inline function definition in C, or for a gnu_inline function
/// in C++, determine whether the definition will be externally visible.
///
@@ -3664,23 +3734,23 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
}
unsigned FunctionDecl::getODRHash() const {
- assert(HasODRHash);
+ assert(hasODRHash());
return ODRHash;
}
unsigned FunctionDecl::getODRHash() {
- if (HasODRHash)
+ if (hasODRHash())
return ODRHash;
if (auto *FT = getInstantiatedFromMemberFunction()) {
- HasODRHash = true;
+ setHasODRHash(true);
ODRHash = FT->getODRHash();
return ODRHash;
}
class ODRHash Hash;
Hash.AddFunctionDecl(this);
- HasODRHash = true;
+ setHasODRHash(true);
ODRHash = Hash.CalculateHash();
return ODRHash;
}
@@ -3749,7 +3819,7 @@ SourceRange FieldDecl::getSourceRange() const {
if (!FinalExpr)
FinalExpr = getBitWidth();
if (FinalExpr)
- return SourceRange(getInnerLocStart(), FinalExpr->getLocEnd());
+ return SourceRange(getInnerLocStart(), FinalExpr->getEndLoc());
return DeclaratorDecl::getSourceRange();
}
@@ -3767,6 +3837,22 @@ void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
// TagDecl Implementation
//===----------------------------------------------------------------------===//
+TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id, TagDecl *PrevDecl,
+ SourceLocation StartL)
+ : TypeDecl(DK, DC, L, Id, StartL), DeclContext(DK), redeclarable_base(C),
+ TypedefNameDeclOrQualifier((TypedefNameDecl *)nullptr) {
+ assert((DK != Enum || TK == TTK_Enum) &&
+ "EnumDecl not matched with TTK_Enum");
+ setPreviousDecl(PrevDecl);
+ setTagKind(TK);
+ setCompleteDefinition(false);
+ setBeingDefined(false);
+ setEmbeddedInDeclarator(false);
+ setFreeStanding(false);
+ setCompleteDefinitionRequired(false);
+}
+
SourceLocation TagDecl::getOuterLocStart() const {
return getTemplateOrInnerLocStart(this);
}
@@ -3789,7 +3875,7 @@ void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
}
void TagDecl::startDefinition() {
- IsBeingDefined = true;
+ setBeingDefined(true);
if (auto *D = dyn_cast<CXXRecordDecl>(this)) {
struct CXXRecordDecl::DefinitionData *Data =
@@ -3804,8 +3890,8 @@ void TagDecl::completeDefinition() {
cast<CXXRecordDecl>(this)->hasDefinition()) &&
"definition completed but not started");
- IsCompleteDefinition = true;
- IsBeingDefined = false;
+ setCompleteDefinition(true);
+ setBeingDefined(false);
if (ASTMutationListener *L = getASTMutationListener())
L->CompletedTagDefinition(this);
@@ -3816,7 +3902,7 @@ TagDecl *TagDecl::getDefinition() const {
return const_cast<TagDecl *>(this);
// If it's possible for us to have an out-of-date definition, check now.
- if (MayHaveOutOfDateDef) {
+ if (mayHaveOutOfDateDef()) {
if (IdentifierInfo *II = getIdentifier()) {
if (II->isOutOfDate()) {
updateOutOfDate(*II);
@@ -3869,6 +3955,21 @@ void TagDecl::setTemplateParameterListsInfo(
// EnumDecl Implementation
//===----------------------------------------------------------------------===//
+EnumDecl::EnumDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id, EnumDecl *PrevDecl,
+ bool Scoped, bool ScopedUsingClassTag, bool Fixed)
+ : TagDecl(Enum, TTK_Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ assert(Scoped || !ScopedUsingClassTag);
+ IntegerType = nullptr;
+ setNumPositiveBits(0);
+ setNumNegativeBits(0);
+ setScoped(Scoped);
+ setScopedUsingClassTag(ScopedUsingClassTag);
+ setFixed(Fixed);
+ setHasODRHash(false);
+ ODRHash = 0;
+}
+
void EnumDecl::anchor() {}
EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3878,7 +3979,7 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
bool IsScopedUsingClassTag, bool IsFixed) {
auto *Enum = new (C, DC) EnumDecl(C, DC, StartLoc, IdLoc, Id, PrevDecl,
IsScoped, IsScopedUsingClassTag, IsFixed);
- Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
C.getTypeDeclType(Enum, PrevDecl);
return Enum;
}
@@ -3887,7 +3988,7 @@ EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
EnumDecl *Enum =
new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr, false, false, false);
- Enum->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ Enum->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
return Enum;
}
@@ -3971,12 +4072,12 @@ void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
}
unsigned EnumDecl::getODRHash() {
- if (HasODRHash)
+ if (hasODRHash())
return ODRHash;
class ODRHash Hash;
Hash.AddEnumDecl(this);
- HasODRHash = true;
+ setHasODRHash(true);
ODRHash = Hash.CalculateHash();
return ODRHash;
}
@@ -3989,14 +4090,18 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
RecordDecl *PrevDecl)
- : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc),
- HasFlexibleArrayMember(false), AnonymousStructOrUnion(false),
- HasObjectMember(false), HasVolatileMember(false),
- LoadedFieldsFromExternalStorage(false),
- NonTrivialToPrimitiveDefaultInitialize(false),
- NonTrivialToPrimitiveCopy(false), NonTrivialToPrimitiveDestroy(false),
- ParamDestroyedInCallee(false), ArgPassingRestrictions(APK_CanPassInRegs) {
- assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
+ : TagDecl(DK, TK, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ assert(classof(static_cast<Decl *>(this)) && "Invalid Kind!");
+ setHasFlexibleArrayMember(false);
+ setAnonymousStructOrUnion(false);
+ setHasObjectMember(false);
+ setHasVolatileMember(false);
+ setHasLoadedFieldsFromExternalStorage(false);
+ setNonTrivialToPrimitiveDefaultInitialize(false);
+ setNonTrivialToPrimitiveCopy(false);
+ setNonTrivialToPrimitiveDestroy(false);
+ setParamDestroyedInCallee(false);
+ setArgPassingRestrictions(APK_CanPassInRegs);
}
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
@@ -4004,7 +4109,7 @@ RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
IdentifierInfo *Id, RecordDecl* PrevDecl) {
RecordDecl *R = new (C, DC) RecordDecl(Record, TK, C, DC,
StartLoc, IdLoc, Id, PrevDecl);
- R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
C.getTypeDeclType(R, PrevDecl);
return R;
@@ -4014,7 +4119,7 @@ RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
RecordDecl *R =
new (C, ID) RecordDecl(Record, TTK_Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr);
- R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
return R;
}
@@ -4038,7 +4143,7 @@ void RecordDecl::setCapturedRecord() {
}
RecordDecl::field_iterator RecordDecl::field_begin() const {
- if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
+ if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage())
LoadFieldsFromExternalStorage();
return field_iterator(decl_iterator(FirstDecl));
@@ -4066,7 +4171,7 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource::Deserializing TheFields(Source);
SmallVector<Decl*, 64> Decls;
- LoadedFieldsFromExternalStorage = true;
+ setHasLoadedFieldsFromExternalStorage(true);
Source->FindExternalLexicalDecls(this, [](Decl::Kind K) {
return FieldDecl::classofKind(K) || IndirectFieldDecl::classofKind(K);
}, Decls);
@@ -4148,6 +4253,15 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
// BlockDecl Implementation
//===----------------------------------------------------------------------===//
+BlockDecl::BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
+ : Decl(Block, DC, CaretLoc), DeclContext(Block) {
+ setIsVariadic(false);
+ setCapturesCXXThis(false);
+ setBlockMissingReturnType(true);
+ setIsConversionFromLambda(false);
+ setDoesNotEscape(false);
+}
+
void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
assert(!ParamInfo && "Already has param info!");
@@ -4161,7 +4275,7 @@ void BlockDecl::setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
void BlockDecl::setCaptures(ASTContext &Context, ArrayRef<Capture> Captures,
bool CapturesCXXThis) {
- this->CapturesCXXThis = CapturesCXXThis;
+ this->setCapturesCXXThis(CapturesCXXThis);
this->NumCaptures = Captures.size();
if (Captures.empty()) {
@@ -4182,7 +4296,7 @@ bool BlockDecl::capturesVariable(const VarDecl *variable) const {
}
SourceRange BlockDecl::getSourceRange() const {
- return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
+ return SourceRange(getLocation(), Body ? Body->getEndLoc() : getLocation());
}
//===----------------------------------------------------------------------===//
@@ -4315,7 +4429,7 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
FunctionDecl *New =
new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo,
SC, isInlineSpecified, isConstexprSpecified);
- New->HasWrittenPrototype = hasWrittenPrototype;
+ New->setHasWrittenPrototype(hasWrittenPrototype);
return New;
}
@@ -4398,7 +4512,7 @@ IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
SourceRange EnumConstantDecl::getSourceRange() const {
SourceLocation End = getLocation();
if (Init)
- End = Init->getLocEnd();
+ End = Init->getEndLoc();
return SourceRange(getLocation(), End);
}
@@ -4472,14 +4586,14 @@ SourceRange TypedefDecl::getSourceRange() const {
if (typeIsPostfix(TInfo->getType()))
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
}
- return SourceRange(getLocStart(), RangeEnd);
+ return SourceRange(getBeginLoc(), RangeEnd);
}
SourceRange TypeAliasDecl::getSourceRange() const {
- SourceLocation RangeEnd = getLocStart();
+ SourceLocation RangeEnd = getBeginLoc();
if (TypeSourceInfo *TInfo = getTypeSourceInfo())
RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
- return SourceRange(getLocStart(), RangeEnd);
+ return SourceRange(getBeginLoc(), RangeEnd);
}
void FileScopeAsmDecl::anchor() {}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 700ddd389ddd..b83082e9eb08 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -153,12 +153,12 @@ void Decl::setInvalidDecl(bool Invalid) {
}
const char *DeclContext::getDeclKindName() const {
- switch (DeclKind) {
- default: llvm_unreachable("Declaration context not in DeclNodes.inc!");
+ switch (getDeclKind()) {
#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
+ llvm_unreachable("Declaration context not in DeclNodes.inc!");
}
bool Decl::StatisticsEnabled = false;
@@ -810,6 +810,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCCategoryImpl:
case Import:
case OMPThreadPrivate:
+ case OMPRequires:
case OMPCapturedExpr:
case Empty:
// Never looked up by name.
@@ -836,6 +837,29 @@ void Decl::dropAttrs() {
getASTContext().eraseDeclAttrs(this);
}
+void Decl::addAttr(Attr *A) {
+ if (!hasAttrs()) {
+ setAttrs(AttrVec(1, A));
+ return;
+ }
+
+ AttrVec &Attrs = getAttrs();
+ if (!A->isInherited()) {
+ Attrs.push_back(A);
+ return;
+ }
+
+ // Attribute inheritance is processed after attribute parsing. To keep the
+ // order as in the source code, add inherited attributes before non-inherited
+ // ones.
+ auto I = Attrs.begin(), E = Attrs.end();
+ for (; I != E; ++I) {
+ if (!(*I)->isInherited())
+ break;
+ }
+ Attrs.insert(I, A);
+}
+
const AttrVec &Decl::getAttrs() const {
assert(HasAttrs && "No attrs to get!");
return getASTContext().getDeclAttrs(this);
@@ -930,6 +954,10 @@ bool Decl::AccessDeclContextSanity() const {
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
+int64_t Decl::getID() const {
+ return getASTContext().getAllocator().identifyKnownAlignedObject<Decl>(this);
+}
+
const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
QualType Ty;
if (const auto *D = dyn_cast<ValueDecl>(this))
@@ -980,6 +1008,16 @@ Decl *DeclContext::getNonClosureAncestor() {
// DeclContext Implementation
//===----------------------------------------------------------------------===//
+DeclContext::DeclContext(Decl::Kind K) {
+ DeclContextBits.DeclKind = K;
+ setHasExternalLexicalStorage(false);
+ setHasExternalVisibleStorage(false);
+ setNeedToReconcileExternalVisibleStorage(false);
+ setHasLazyLocalLexicalLookups(false);
+ setHasLazyExternalLexicalLookups(false);
+ setUseQualifiedLookup(false);
+}
+
bool DeclContext::classof(const Decl *D) {
switch (D->getKind()) {
#define DECL(NAME, BASE)
@@ -1007,7 +1045,7 @@ DeclContext::~DeclContext() = default;
/// a friend function the parent lookup context is the lexical context, which
/// is the class in which the friend is declared.
DeclContext *DeclContext::getLookupParent() {
- // FIXME: Find a better way to identify friends
+ // FIXME: Find a better way to identify friends.
if (isa<FunctionDecl>(this))
if (getParent()->getRedeclContext()->isFileContext() &&
getLexicalParent()->getRedeclContext()->isRecord())
@@ -1070,9 +1108,9 @@ bool DeclContext::isDependentContext() const {
}
bool DeclContext::isTransparentContext() const {
- if (DeclKind == Decl::Enum)
+ if (getDeclKind() == Decl::Enum)
return !cast<EnumDecl>(this)->isScoped();
- else if (DeclKind == Decl::LinkageSpec || DeclKind == Decl::Export)
+ else if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export)
return true;
return false;
@@ -1118,7 +1156,7 @@ bool DeclContext::Encloses(const DeclContext *DC) const {
}
DeclContext *DeclContext::getPrimaryContext() {
- switch (DeclKind) {
+ switch (getDeclKind()) {
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::LinkageSpec:
@@ -1154,7 +1192,7 @@ DeclContext *DeclContext::getPrimaryContext() {
return this;
default:
- if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
+ if (getDeclKind() >= Decl::firstTag && getDeclKind() <= Decl::lastTag) {
// If this is a tag type that has a definition or is currently
// being defined, that definition is our primary context.
auto *Tag = cast<TagDecl>(this);
@@ -1174,7 +1212,8 @@ DeclContext *DeclContext::getPrimaryContext() {
return Tag;
}
- assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
+ assert(getDeclKind() >= Decl::firstFunction &&
+ getDeclKind() <= Decl::lastFunction &&
"Unknown DeclContext kind");
return this;
}
@@ -1184,7 +1223,7 @@ void
DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
Contexts.clear();
- if (DeclKind != Decl::Namespace) {
+ if (getDeclKind() != Decl::Namespace) {
Contexts.push_back(this);
return;
}
@@ -1222,8 +1261,8 @@ DeclContext::BuildDeclChain(ArrayRef<Decl *> Decls,
/// built a lookup map. For every name in the map, pull in the new names from
/// the external storage.
void DeclContext::reconcileExternalVisibleStorage() const {
- assert(NeedToReconcileExternalVisibleStorage && LookupPtr);
- NeedToReconcileExternalVisibleStorage = false;
+ assert(hasNeedToReconcileExternalVisibleStorage() && LookupPtr);
+ setNeedToReconcileExternalVisibleStorage(false);
for (auto &Lookup : *LookupPtr)
Lookup.second.setHasExternalDecls();
@@ -1242,7 +1281,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
// Load the external declarations, if any.
SmallVector<Decl*, 64> Decls;
- ExternalLexicalStorage = false;
+ setHasExternalLexicalStorage(false);
Source->FindExternalLexicalDecls(this, Decls);
if (Decls.empty())
@@ -1252,7 +1291,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
// we need to ignore them.
bool FieldsAlreadyLoaded = false;
if (const auto *RD = dyn_cast<RecordDecl>(this))
- FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
+ FieldsAlreadyLoaded = RD->hasLoadedFieldsFromExternalStorage();
// Splice the newly-read declarations into the beginning of the list
// of declarations.
@@ -1273,7 +1312,7 @@ ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr))
Map = DC->CreateStoredDeclsMap(Context);
- if (DC->NeedToReconcileExternalVisibleStorage)
+ if (DC->hasNeedToReconcileExternalVisibleStorage())
DC->reconcileExternalVisibleStorage();
(*Map)[Name].removeExternalDecls();
@@ -1289,7 +1328,7 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr))
Map = DC->CreateStoredDeclsMap(Context);
- if (DC->NeedToReconcileExternalVisibleStorage)
+ if (DC->hasNeedToReconcileExternalVisibleStorage())
DC->reconcileExternalVisibleStorage();
StoredDeclsList &List = (*Map)[Name];
@@ -1366,6 +1405,12 @@ static bool shouldBeHidden(NamedDecl *D) {
D->isTemplateParameter())
return true;
+ // Skip friends and local extern declarations unless they're the first
+ // declaration of the entity.
+ if ((D->isLocalExternDecl() || D->getFriendObjectKind()) &&
+ D != D->getCanonicalDecl())
+ return true;
+
// Skip template specializations.
// FIXME: This feels like a hack. Should DeclarationName support
// template-ids, or is there a better way to keep specializations
@@ -1424,7 +1469,9 @@ void DeclContext::removeDecl(Decl *D) {
if (Map) {
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
- if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ // Remove the decl only if it is contained.
+ StoredDeclsList::DeclsTy *Vec = Pos->second.getAsVector();
+ if ((Vec && is_contained(*Vec, ND)) || Pos->second.getAsDecl() == ND)
Pos->second.remove(ND);
}
} while (DC->isTransparentContext() && (DC = DC->getParent()));
@@ -1483,21 +1530,24 @@ void DeclContext::addDeclInternal(Decl *D) {
StoredDeclsMap *DeclContext::buildLookup() {
assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
- if (!HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups)
+ if (!hasLazyLocalLexicalLookups() &&
+ !hasLazyExternalLexicalLookups())
return LookupPtr;
SmallVector<DeclContext *, 2> Contexts;
collectAllContexts(Contexts);
- if (HasLazyExternalLexicalLookups) {
- HasLazyExternalLexicalLookups = false;
+ if (hasLazyExternalLexicalLookups()) {
+ setHasLazyExternalLexicalLookups(false);
for (auto *DC : Contexts) {
- if (DC->hasExternalLexicalStorage())
- HasLazyLocalLexicalLookups |=
- DC->LoadLexicalDeclsFromExternalStorage();
+ if (DC->hasExternalLexicalStorage()) {
+ bool LoadedDecls = DC->LoadLexicalDeclsFromExternalStorage();
+ setHasLazyLocalLexicalLookups(
+ hasLazyLocalLexicalLookups() | LoadedDecls );
+ }
}
- if (!HasLazyLocalLexicalLookups)
+ if (!hasLazyLocalLexicalLookups())
return LookupPtr;
}
@@ -1505,7 +1555,7 @@ StoredDeclsMap *DeclContext::buildLookup() {
buildLookupImpl(DC, hasExternalVisibleStorage());
// We no longer have any lazy decls.
- HasLazyLocalLexicalLookups = false;
+ setHasLazyLocalLexicalLookups(false);
return LookupPtr;
}
@@ -1543,7 +1593,8 @@ NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr;
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
- assert(DeclKind != Decl::LinkageSpec && DeclKind != Decl::Export &&
+ assert(getDeclKind() != Decl::LinkageSpec &&
+ getDeclKind() != Decl::Export &&
"should not perform lookups into transparent contexts");
const DeclContext *PrimaryContext = getPrimaryContext();
@@ -1560,12 +1611,13 @@ DeclContext::lookup(DeclarationName Name) const {
if (hasExternalVisibleStorage()) {
assert(Source && "external visible storage but no external source?");
- if (NeedToReconcileExternalVisibleStorage)
+ if (hasNeedToReconcileExternalVisibleStorage())
reconcileExternalVisibleStorage();
StoredDeclsMap *Map = LookupPtr;
- if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
+ if (hasLazyLocalLexicalLookups() ||
+ hasLazyExternalLexicalLookups())
// FIXME: Make buildLookup const?
Map = const_cast<DeclContext*>(this)->buildLookup();
@@ -1590,7 +1642,8 @@ DeclContext::lookup(DeclarationName Name) const {
}
StoredDeclsMap *Map = LookupPtr;
- if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
+ if (hasLazyLocalLexicalLookups() ||
+ hasLazyExternalLexicalLookups())
Map = const_cast<DeclContext*>(this)->buildLookup();
if (!Map)
@@ -1605,7 +1658,8 @@ DeclContext::lookup(DeclarationName Name) const {
DeclContext::lookup_result
DeclContext::noload_lookup(DeclarationName Name) {
- assert(DeclKind != Decl::LinkageSpec && DeclKind != Decl::Export &&
+ assert(getDeclKind() != Decl::LinkageSpec &&
+ getDeclKind() != Decl::Export &&
"should not perform lookups into transparent contexts");
DeclContext *PrimaryContext = getPrimaryContext();
@@ -1626,12 +1680,12 @@ DeclContext::noload_lookup(DeclarationName Name) {
// now. Don't import any external declarations, not even if we know we have
// some missing from the external visible lookups.
void DeclContext::loadLazyLocalLexicalLookups() {
- if (HasLazyLocalLexicalLookups) {
+ if (hasLazyLocalLexicalLookups()) {
SmallVector<DeclContext *, 2> Contexts;
collectAllContexts(Contexts);
for (auto *Context : Contexts)
buildLookupImpl(Context, hasExternalVisibleStorage());
- HasLazyLocalLexicalLookups = false;
+ setHasLazyLocalLexicalLookups(false);
}
}
@@ -1649,7 +1703,8 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
// If we have a lookup table, check there first. Maybe we'll get lucky.
// FIXME: Should we be checking these flags on the primary context?
- if (Name && !HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups) {
+ if (Name && !hasLazyLocalLexicalLookups() &&
+ !hasLazyExternalLexicalLookups()) {
if (StoredDeclsMap *Map = LookupPtr) {
StoredDeclsMap::iterator Pos = Map->find(Name);
if (Pos != Map->end()) {
@@ -1674,8 +1729,18 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
DeclContext *DeclContext::getRedeclContext() {
DeclContext *Ctx = this;
- // Skip through transparent contexts.
- while (Ctx->isTransparentContext())
+
+ // In C, a record type is the redeclaration context for its fields only. If
+ // we arrive at a record context after skipping anything else, we should skip
+ // the record as well. Currently, this means skipping enumerations because
+ // they're the only transparent context that can exist within a struct or
+ // union.
+ bool SkipRecords = getDeclKind() == Decl::Kind::Enum &&
+ !getParentASTContext().getLangOpts().CPlusPlus;
+
+ // Skip through contexts to get to the redeclaration context. Transparent
+ // contexts are always skipped.
+ while ((SkipRecords && Ctx->isRecord()) || Ctx->isTransparentContext())
Ctx = Ctx->getParent();
return Ctx;
}
@@ -1758,7 +1823,7 @@ void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
buildLookup();
makeDeclVisibleInContextImpl(D, Internal);
} else {
- HasLazyLocalLexicalLookups = true;
+ setHasLazyLocalLexicalLookups(true);
}
// If we are a transparent context or inline namespace, insert into our
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 51e8e14f804a..31ffeb0dcd1e 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -128,7 +128,7 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
bool DelayTypeCreation) {
auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc, IdLoc, Id,
PrevDecl);
- R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
+ R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
// FIXME: DelayTypeCreation seems like such a hack
if (!DelayTypeCreation)
@@ -143,11 +143,11 @@ CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
LambdaCaptureDefault CaptureDefault) {
auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
nullptr, nullptr);
- R->IsBeingDefined = true;
+ R->setBeingDefined(true);
R->DefinitionData =
new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
CaptureDefault);
- R->MayHaveOutOfDateDef = false;
+ R->setMayHaveOutOfDateDef(false);
R->setImplicit(true);
C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
@@ -158,7 +158,7 @@ CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
auto *R = new (C, ID) CXXRecordDecl(
CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
- R->MayHaveOutOfDateDef = false;
+ R->setMayHaveOutOfDateDef(false);
return R;
}
@@ -628,6 +628,24 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType(
return false;
}
+bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const {
+ assert(isLambda() && "not a lambda");
+
+ // C++2a [expr.prim.lambda.capture]p11:
+ // The closure type associated with a lambda-expression has no default
+ // constructor if the lambda-expression has a lambda-capture and a
+ // defaulted default constructor otherwise. It has a deleted copy
+ // assignment operator if the lambda-expression has a lambda-capture and
+ // defaulted copy and move assignment operators otherwise.
+ //
+ // C++17 [expr.prim.lambda]p21:
+ // The closure type associated with a lambda-expression has no default
+ // constructor and a deleted copy assignment operator.
+ if (getLambdaCaptureDefault() != LCD_None)
+ return false;
+ return getASTContext().getLangOpts().CPlusPlus2a;
+}
+
void CXXRecordDecl::addedMember(Decl *D) {
if (!D->isImplicit() &&
!isa<FieldDecl>(D) &&
@@ -731,9 +749,14 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
// C++11 [dcl.init.aggr]p1: DR1518
- // An aggregate is an array or a class with no user-provided, explicit, or
- // inherited constructors
- if (Constructor->isUserProvided() || Constructor->isExplicit())
+ // An aggregate is an array or a class with no user-provided [or]
+ // explicit [...] constructors
+ // C++20 [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-declared [...]
+ // constructors
+ if (getASTContext().getLangOpts().CPlusPlus2a
+ ? !Constructor->isImplicit()
+ : (Constructor->isUserProvided() || Constructor->isExplicit()))
data().Aggregate = false;
}
@@ -1327,6 +1350,15 @@ bool CXXRecordDecl::isGenericLambda() const {
return getLambdaData().IsGenericLambda;
}
+#ifndef NDEBUG
+static bool allLookupResultsAreTheSame(const DeclContext::lookup_result &R) {
+ for (auto *D : R)
+ if (!declaresSameEntity(D, R.front()))
+ return false;
+ return true;
+}
+#endif
+
CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const {
if (!isLambda()) return nullptr;
DeclarationName Name =
@@ -1334,7 +1366,8 @@ CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const {
DeclContext::lookup_result Calls = lookup(Name);
assert(!Calls.empty() && "Missing lambda call operator!");
- assert(Calls.size() == 1 && "More than one lambda call operator!");
+ assert(allLookupResultsAreTheSame(Calls) &&
+ "More than one lambda call operator!");
NamedDecl *CallOp = Calls.front();
if (const auto *CallOpTmpl = dyn_cast<FunctionTemplateDecl>(CallOp))
@@ -1349,7 +1382,8 @@ CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const {
&getASTContext().Idents.get(getLambdaStaticInvokerName());
DeclContext::lookup_result Invoker = lookup(Name);
if (Invoker.empty()) return nullptr;
- assert(Invoker.size() == 1 && "More than one static invoker operator!");
+ assert(allLookupResultsAreTheSame(Invoker) &&
+ "More than one static invoker operator!");
NamedDecl *InvokerFun = Invoker.front();
if (const auto *InvokerTemplate = dyn_cast<FunctionTemplateDecl>(InvokerFun))
return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl());
@@ -1994,7 +2028,9 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
return nullptr;
}
-bool CXXMethodDecl::isUsualDeallocationFunction() const {
+bool CXXMethodDecl::isUsualDeallocationFunction(
+ SmallVectorImpl<const FunctionDecl *> &PreventedBy) const {
+ assert(PreventedBy.empty() && "PreventedBy is expected to be empty");
if (getOverloadedOperator() != OO_Delete &&
getOverloadedOperator() != OO_Array_Delete)
return false;
@@ -2052,14 +2088,16 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const {
// This function is a usual deallocation function if there are no
// single-parameter deallocation functions of the same kind.
DeclContext::lookup_result R = getDeclContext()->lookup(getDeclName());
- for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
- I != E; ++I) {
- if (const auto *FD = dyn_cast<FunctionDecl>(*I))
- if (FD->getNumParams() == 1)
- return false;
+ bool Result = true;
+ for (const auto *D : R) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getNumParams() == 1) {
+ PreventedBy.push_back(FD);
+ Result = false;
+ }
+ }
}
-
- return true;
+ return Result;
}
bool CXXMethodDecl::isCopyAssignmentOperator() const {
@@ -2135,19 +2173,24 @@ CXXMethodDecl::overridden_methods() const {
return getASTContext().overridden_methods(this);
}
-QualType CXXMethodDecl::getThisType(ASTContext &C) const {
+QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
+ const CXXRecordDecl *Decl) {
+ ASTContext &C = Decl->getASTContext();
+ QualType ClassTy = C.getTypeDeclType(Decl);
+ ClassTy = C.getQualifiedType(ClassTy, FPT->getTypeQuals());
+ return C.getPointerType(ClassTy);
+}
+
+QualType CXXMethodDecl::getThisType() const {
// C++ 9.3.2p1: The type of this in a member function of a class X is X*.
// If the member function is declared const, the type of this is const X*,
// if the member function is declared volatile, the type of this is
// volatile X*, and if the member function is declared const volatile,
// the type of this is const volatile X*.
-
assert(isInstance() && "No 'this' for static methods!");
- QualType ClassTy = C.getTypeDeclType(getParent());
- ClassTy = C.getQualifiedType(ClassTy,
- Qualifiers::fromCVRUMask(getTypeQualifiers()));
- return C.getPointerType(ClassTy);
+ return CXXMethodDecl::getThisType(getType()->getAs<FunctionProtoType>(),
+ getParent());
}
bool CXXMethodDecl::hasInlineBody() const {
@@ -2208,6 +2251,11 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
: Initializee(TInfo), Init(Init), LParenLoc(L), RParenLoc(R),
IsDelegating(true), IsVirtual(false), IsWritten(false), SourceOrder(0) {}
+int64_t CXXCtorInitializer::getID(const ASTContext &Context) const {
+ return Context.getAllocator()
+ .identifyKnownAlignedObject<CXXCtorInitializer>(this);
+}
+
TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
@@ -2246,6 +2294,21 @@ SourceRange CXXCtorInitializer::getSourceRange() const {
return SourceRange(getSourceLocation(), getRParenLoc());
}
+CXXConstructorDecl::CXXConstructorDecl(
+ ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ bool isExplicitSpecified, bool isInline, bool isImplicitlyDeclared,
+ bool isConstexpr, InheritedConstructor Inherited)
+ : CXXMethodDecl(CXXConstructor, C, RD, StartLoc, NameInfo, T, TInfo,
+ SC_None, isInline, isConstexpr, SourceLocation()) {
+ setNumCtorInitializers(0);
+ setInheritingConstructor(static_cast<bool>(Inherited));
+ setImplicit(isImplicitlyDeclared);
+ if (Inherited)
+ *getTrailingObjects<InheritedConstructor>() = Inherited;
+ setExplicitSpecified(isExplicitSpecified);
+}
+
void CXXConstructorDecl::anchor() {}
CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
@@ -2255,7 +2318,7 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
auto *Result = new (C, ID, Extra) CXXConstructorDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
false, false, false, false, InheritedConstructor());
- Result->IsInheritingConstructor = Inherited;
+ Result->setInheritingConstructor(Inherited);
return Result;
}
@@ -2451,6 +2514,15 @@ bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
getConversionType()->isBlockPointerType();
}
+LinkageSpecDecl::LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc,
+ SourceLocation LangLoc, LanguageIDs lang,
+ bool HasBraces)
+ : Decl(LinkageSpec, DC, LangLoc), DeclContext(LinkageSpec),
+ ExternLoc(ExternLoc), RBraceLoc(SourceLocation()) {
+ setLanguage(lang);
+ LinkageSpecDeclBits.HasBraces = HasBraces;
+}
+
void LinkageSpecDecl::anchor() {}
LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
@@ -2838,6 +2910,8 @@ void DecompositionDecl::printName(llvm::raw_ostream &os) const {
os << ']';
}
+void MSPropertyDecl::anchor() {}
+
MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName N,
QualType T, TypeSourceInfo *TInfo,
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 01fd10429fc1..1ed7fc71b025 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -65,6 +65,13 @@ void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
+ObjCContainerDecl::ObjCContainerDecl(Kind DK, DeclContext *DC,
+ IdentifierInfo *Id, SourceLocation nameLoc,
+ SourceLocation atStartLoc)
+ : NamedDecl(DK, DC, nameLoc, Id), DeclContext(DK) {
+ setAtStartLoc(atStartLoc);
+}
+
void ObjCContainerDecl::anchor() {}
/// getIvarDecl - This method looks up an ivar in this ContextDecl.
@@ -350,7 +357,7 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::getSuperClass() const {
SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const {
if (TypeSourceInfo *superTInfo = getSuperClassTInfo())
- return superTInfo->getTypeLoc().getLocStart();
+ return superTInfo->getTypeLoc().getBeginLoc();
return SourceLocation();
}
@@ -769,6 +776,37 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
// ObjCMethodDecl
//===----------------------------------------------------------------------===//
+ObjCMethodDecl::ObjCMethodDecl(SourceLocation beginLoc, SourceLocation endLoc,
+ Selector SelInfo, QualType T,
+ TypeSourceInfo *ReturnTInfo,
+ DeclContext *contextDecl, bool isInstance,
+ bool isVariadic, bool isPropertyAccessor,
+ bool isImplicitlyDeclared, bool isDefined,
+ ImplementationControl impControl,
+ bool HasRelatedResultType)
+ : NamedDecl(ObjCMethod, contextDecl, beginLoc, SelInfo),
+ DeclContext(ObjCMethod), MethodDeclType(T), ReturnTInfo(ReturnTInfo),
+ DeclEndLoc(endLoc) {
+
+ // Initialized the bits stored in DeclContext.
+ ObjCMethodDeclBits.Family =
+ static_cast<ObjCMethodFamily>(InvalidObjCMethodFamily);
+ setInstanceMethod(isInstance);
+ setVariadic(isVariadic);
+ setPropertyAccessor(isPropertyAccessor);
+ setDefined(isDefined);
+ setIsRedeclaration(false);
+ setHasRedeclaration(false);
+ setDeclImplementation(impControl);
+ setObjCDeclQualifier(OBJC_TQ_None);
+ setRelatedResultType(HasRelatedResultType);
+ setSelLocsKind(SelLoc_StandardNoSpace);
+ setOverriding(false);
+ setHasSkippedBody(false);
+
+ setImplicit(isImplicitlyDeclared);
+}
+
ObjCMethodDecl *ObjCMethodDecl::Create(
ASTContext &C, SourceLocation beginLoc, SourceLocation endLoc,
Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
@@ -791,6 +829,14 @@ bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
hasAttr<ObjCDesignatedInitializerAttr>();
}
+bool ObjCMethodDecl::definedInNSObject(const ASTContext &Ctx) const {
+ if (const auto *PD = dyn_cast<const ObjCProtocolDecl>(getDeclContext()))
+ return PD->getIdentifier() == Ctx.getNSObjectName();
+ if (const auto *ID = dyn_cast<const ObjCInterfaceDecl>(getDeclContext()))
+ return ID->getIdentifier() == Ctx.getNSObjectName();
+ return false;
+}
+
bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
const ObjCMethodDecl **InitMethod) const {
if (getMethodFamily() != OMF_init)
@@ -810,8 +856,8 @@ Stmt *ObjCMethodDecl::getBody() const {
void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
assert(PrevMethod);
getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
- IsRedeclaration = true;
- PrevMethod->HasRedeclaration = true;
+ setIsRedeclaration(true);
+ PrevMethod->setHasRedeclaration(true);
}
void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
@@ -846,9 +892,9 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C,
if (isImplicit())
return setParamsAndSelLocs(C, Params, llvm::None);
- SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params,
- DeclEndLoc);
- if (SelLocsKind != SelLoc_NonStandard)
+ setSelLocsKind(hasStandardSelectorLocs(getSelector(), SelLocs, Params,
+ DeclEndLoc));
+ if (getSelLocsKind() != SelLoc_NonStandard)
return setParamsAndSelLocs(C, Params, llvm::None);
setParamsAndSelLocs(C, Params, SelLocs);
@@ -860,7 +906,7 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C,
ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
ASTContext &Ctx = getASTContext();
ObjCMethodDecl *Redecl = nullptr;
- if (HasRedeclaration)
+ if (hasRedeclaration())
Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this));
if (Redecl)
return Redecl;
@@ -931,14 +977,14 @@ ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
return this;
}
-SourceLocation ObjCMethodDecl::getLocEnd() const {
+SourceLocation ObjCMethodDecl::getEndLoc() const {
if (Stmt *Body = getBody())
- return Body->getLocEnd();
+ return Body->getEndLoc();
return DeclEndLoc;
}
ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
- auto family = static_cast<ObjCMethodFamily>(Family);
+ auto family = static_cast<ObjCMethodFamily>(ObjCMethodDeclBits.Family);
if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
return family;
@@ -954,7 +1000,7 @@ ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
}
- Family = static_cast<unsigned>(family);
+ ObjCMethodDeclBits.Family = family;
return family;
}
@@ -1025,7 +1071,7 @@ ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
}
// Cache the result.
- Family = static_cast<unsigned>(family);
+ ObjCMethodDeclBits.Family = family;
return family;
}
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index f5c3599ef6c6..b77a67cbf38d 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -54,9 +54,49 @@ void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
}
//===----------------------------------------------------------------------===//
+// OMPRequiresDecl Implementation.
+//===----------------------------------------------------------------------===//
+
+void OMPRequiresDecl::anchor() {}
+
+OMPRequiresDecl *OMPRequiresDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ ArrayRef<OMPClause *> CL) {
+ OMPRequiresDecl *D =
+ new (C, DC, additionalSizeToAlloc<OMPClause *>(CL.size()))
+ OMPRequiresDecl(OMPRequires, DC, L);
+ D->NumClauses = CL.size();
+ D->setClauses(CL);
+ return D;
+}
+
+OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned N) {
+ OMPRequiresDecl *D = new (C, ID, additionalSizeToAlloc<OMPClause *>(N))
+ OMPRequiresDecl(OMPRequires, nullptr, SourceLocation());
+ D->NumClauses = N;
+ return D;
+}
+
+void OMPRequiresDecl::setClauses(ArrayRef<OMPClause *> CL) {
+ assert(CL.size() == NumClauses &&
+ "Number of clauses is not the same as the preallocated buffer");
+ std::uninitialized_copy(CL.begin(), CL.end(),
+ getTrailingObjects<OMPClause *>());
+}
+
+//===----------------------------------------------------------------------===//
// OMPDeclareReductionDecl Implementation.
//===----------------------------------------------------------------------===//
+OMPDeclareReductionDecl::OMPDeclareReductionDecl(
+ Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name,
+ QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope)
+ : ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), Combiner(nullptr),
+ PrevDeclInScope(PrevDeclInScope) {
+ setInitializer(nullptr, CallInit);
+}
+
void OMPDeclareReductionDecl::anchor() {}
OMPDeclareReductionDecl *OMPDeclareReductionDecl::Create(
@@ -104,5 +144,5 @@ OMPCapturedExprDecl *OMPCapturedExprDecl::CreateDeserialized(ASTContext &C,
SourceRange OMPCapturedExprDecl::getSourceRange() const {
assert(hasInit());
- return SourceRange(getInit()->getLocStart(), getInit()->getLocEnd());
+ return SourceRange(getInit()->getBeginLoc(), getInit()->getEndLoc());
}
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 09e22f19f87a..517851f9eeb1 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -100,6 +100,7 @@ namespace {
void VisitUsingDecl(UsingDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+ void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
@@ -422,7 +423,8 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
// FIXME: Need to be able to tell the DeclPrinter when
const char *Terminator = nullptr;
- if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D))
+ if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D) ||
+ isa<OMPRequiresDecl>(*D))
Terminator = nullptr;
else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody())
Terminator = nullptr;
@@ -544,7 +546,7 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
prettyPrintAttributes(D);
if (Expr *Init = D->getInitExpr()) {
Out << " = ";
- Init->printPretty(Out, nullptr, Policy, Indentation, &Context);
+ Init->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
}
@@ -1091,6 +1093,10 @@ void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
printTemplateParameters(FD->getTemplateParameterList(I));
}
VisitRedeclarableTemplateDecl(D);
+ // Declare target attribute is special one, natural spelling for the pragma
+ // assumes "ending" construct so print it here.
+ if (D->getTemplatedDecl()->hasAttr<OMPDeclareTargetDeclAttr>())
+ Out << "#pragma omp end declare target\n";
// Never print "instantiations" for deduction guides (they don't really
// have them).
@@ -1540,6 +1546,15 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
}
}
+void DeclPrinter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
+ Out << "#pragma omp requires ";
+ if (!D->clauselist_empty()) {
+ OMPClausePrinter Printer(Out, Policy);
+ for (auto I = D->clauselist_begin(), E = D->clauselist_end(); I != E; ++I)
+ Printer.Visit(*I);
+ }
+}
+
void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
if (!D->isInvalidDecl()) {
Out << "#pragma omp declare reduction (";
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 848427e7f9d9..76f29dac1647 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -149,6 +149,8 @@ void *allocateDefaultArgStorageChain(const ASTContext &C) {
// RedeclarableTemplateDecl Implementation
//===----------------------------------------------------------------------===//
+void RedeclarableTemplateDecl::anchor() {}
+
RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() const {
if (Common)
return Common;
@@ -300,6 +302,40 @@ ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
}
+void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) {
+ using Base = RedeclarableTemplateDecl;
+
+ // If we haven't created a common pointer yet, then it can just be created
+ // with the usual method.
+ if (!Base::Common)
+ return;
+
+ Common *ThisCommon = static_cast<Common *>(Base::Common);
+ Common *PrevCommon = nullptr;
+ SmallVector<FunctionTemplateDecl *, 8> PreviousDecls;
+ for (; Prev; Prev = Prev->getPreviousDecl()) {
+ if (Prev->Base::Common) {
+ PrevCommon = static_cast<Common *>(Prev->Base::Common);
+ break;
+ }
+ PreviousDecls.push_back(Prev);
+ }
+
+ // If the previous redecl chain hasn't created a common pointer yet, then just
+ // use this common pointer.
+ if (!PrevCommon) {
+ for (auto *D : PreviousDecls)
+ D->Base::Common = ThisCommon;
+ return;
+ }
+
+ // Ensure we don't leak any important state.
+ assert(ThisCommon->Specializations.size() == 0 &&
+ "Can't merge incompatible declarations!");
+
+ Base::Common = PrevCommon;
+}
+
//===----------------------------------------------------------------------===//
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
@@ -473,7 +509,7 @@ SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
SourceRange TemplateTypeParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
- return SourceRange(getLocStart(),
+ return SourceRange(getBeginLoc(),
getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
else
return TypeDecl::getSourceRange();
@@ -712,7 +748,7 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
new (Context, DC) ClassTemplateSpecializationDecl(
Context, ClassTemplateSpecialization, TK, DC, StartLoc, IdLoc,
SpecializedTemplate, Args, PrevDecl);
- Result->MayHaveOutOfDateDef = false;
+ Result->setMayHaveOutOfDateDef(false);
Context.getTypeDeclType(Result, PrevDecl);
return Result;
@@ -723,7 +759,7 @@ ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
auto *Result =
new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization);
- Result->MayHaveOutOfDateDef = false;
+ Result->setMayHaveOutOfDateDef(false);
return Result;
}
@@ -830,7 +866,7 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC,
Params, SpecializedTemplate, Args,
ASTArgInfos, PrevDecl);
Result->setSpecializationKind(TSK_ExplicitSpecialization);
- Result->MayHaveOutOfDateDef = false;
+ Result->setMayHaveOutOfDateDef(false);
Context.getInjectedClassNameType(Result, CanonInjectedType);
return Result;
@@ -840,7 +876,7 @@ ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C);
- Result->MayHaveOutOfDateDef = false;
+ Result->setMayHaveOutOfDateDef(false);
return Result;
}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index 9866d92f61eb..f2c152f918eb 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -39,74 +39,6 @@
using namespace clang;
-namespace clang {
-
-/// CXXSpecialName - Records the type associated with one of the
-/// "special" kinds of declaration names in C++, e.g., constructors,
-/// destructors, and conversion functions.
-class CXXSpecialName
- : public DeclarationNameExtra, public llvm::FoldingSetNode {
-public:
- /// Type - The type associated with this declaration name.
- QualType Type;
-
- /// FETokenInfo - Extra information associated with this declaration
- /// name that can be used by the front end.
- void *FETokenInfo;
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddInteger(ExtraKindOrNumArgs);
- ID.AddPointer(Type.getAsOpaquePtr());
- }
-};
-
-/// Contains extra information for the name of a C++ deduction guide.
-class CXXDeductionGuideNameExtra : public DeclarationNameExtra,
- public llvm::FoldingSetNode {
-public:
- /// The template named by the deduction guide.
- TemplateDecl *Template;
-
- /// FETokenInfo - Extra information associated with this operator
- /// name that can be used by the front end.
- void *FETokenInfo;
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddPointer(Template);
- }
-};
-
-/// CXXOperatorIdName - Contains extra information for the name of an
-/// overloaded operator in C++, such as "operator+.
-class CXXOperatorIdName : public DeclarationNameExtra {
-public:
- /// FETokenInfo - Extra information associated with this operator
- /// name that can be used by the front end.
- void *FETokenInfo;
-};
-
-/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
-/// name.
-///
-/// This identifier is stored here rather than directly in DeclarationName so as
-/// to allow Objective-C selectors, which are about a million times more common,
-/// to consume minimal memory.
-class CXXLiteralOperatorIdName
- : public DeclarationNameExtra, public llvm::FoldingSetNode {
-public:
- IdentifierInfo *ID;
-
- /// FETokenInfo - Extra information associated with this operator
- /// name that can be used by the front end.
- void *FETokenInfo;
-
- void Profile(llvm::FoldingSetNodeID &FSID) {
- FSID.AddPointer(ID);
- }
-};
-
-} // namespace clang
-
static int compareInt(unsigned A, unsigned B) {
return (A < B ? -1 : (A > B ? 1 : 0));
}
@@ -117,10 +49,12 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
switch (LHS.getNameKind()) {
case DeclarationName::Identifier: {
- IdentifierInfo *LII = LHS.getAsIdentifierInfo();
- IdentifierInfo *RII = RHS.getAsIdentifierInfo();
- if (!LII) return RII ? -1 : 0;
- if (!RII) return 1;
+ IdentifierInfo *LII = LHS.castAsIdentifierInfo();
+ IdentifierInfo *RII = RHS.castAsIdentifierInfo();
+ if (!LII)
+ return RII ? -1 : 0;
+ if (!RII)
+ return 1;
return LII->getName().compare(RII->getName());
}
@@ -134,15 +68,18 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
if (LHS.getNameKind() == DeclarationName::ObjCZeroArgSelector &&
RHS.getNameKind() == DeclarationName::ObjCZeroArgSelector) {
return LHSSelector.getAsIdentifierInfo()->getName().compare(
- RHSSelector.getAsIdentifierInfo()->getName());
+ RHSSelector.getAsIdentifierInfo()->getName());
}
unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
switch (LHSSelector.getNameForSlot(I).compare(
- RHSSelector.getNameForSlot(I))) {
- case -1: return -1;
- case 1: return 1;
- default: break;
+ RHSSelector.getNameForSlot(I))) {
+ case -1:
+ return -1;
+ case 1:
+ return 1;
+ default:
+ break;
}
}
@@ -170,7 +107,7 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
case DeclarationName::CXXLiteralOperatorName:
return LHS.getCXXLiteralIdentifier()->getName().compare(
- RHS.getCXXLiteralIdentifier()->getName());
+ RHS.getCXXLiteralIdentifier()->getName());
case DeclarationName::CXXUsingDirective:
return 0;
@@ -199,25 +136,24 @@ static void printCXXConstructorDestructorName(QualType ClassType,
}
void DeclarationName::print(raw_ostream &OS, const PrintingPolicy &Policy) {
- DeclarationName &N = *this;
- switch (N.getNameKind()) {
+ switch (getNameKind()) {
case DeclarationName::Identifier:
- if (const IdentifierInfo *II = N.getAsIdentifierInfo())
+ if (const IdentifierInfo *II = getAsIdentifierInfo())
OS << II->getName();
return;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
- N.getObjCSelector().print(OS);
+ getObjCSelector().print(OS);
return;
case DeclarationName::CXXConstructorName:
- return printCXXConstructorDestructorName(N.getCXXNameType(), OS, Policy);
+ return printCXXConstructorDestructorName(getCXXNameType(), OS, Policy);
case DeclarationName::CXXDestructorName:
OS << '~';
- return printCXXConstructorDestructorName(N.getCXXNameType(), OS, Policy);
+ return printCXXConstructorDestructorName(getCXXNameType(), OS, Policy);
case DeclarationName::CXXDeductionGuideName:
OS << "<deduction guide for ";
@@ -226,13 +162,13 @@ void DeclarationName::print(raw_ostream &OS, const PrintingPolicy &Policy) {
return;
case DeclarationName::CXXOperatorName: {
- static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
- nullptr,
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- Spelling,
+ static const char *const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
+ nullptr,
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ Spelling,
#include "clang/Basic/OperatorKinds.def"
};
- const char *OpName = OperatorNames[N.getCXXOverloadedOperator()];
+ const char *OpName = OperatorNames[getCXXOverloadedOperator()];
assert(OpName && "not an overloaded operator");
OS << "operator";
@@ -243,12 +179,12 @@ void DeclarationName::print(raw_ostream &OS, const PrintingPolicy &Policy) {
}
case DeclarationName::CXXLiteralOperatorName:
- OS << "operator\"\"" << N.getCXXLiteralIdentifier()->getName();
+ OS << "operator\"\"" << getCXXLiteralIdentifier()->getName();
return;
case DeclarationName::CXXConversionFunctionName: {
OS << "operator ";
- QualType Type = N.getCXXNameType();
+ QualType Type = getCXXNameType();
if (const RecordType *Rec = Type->getAs<RecordType>()) {
OS << *Rec->getDecl();
return;
@@ -277,46 +213,6 @@ raw_ostream &operator<<(raw_ostream &OS, DeclarationName N) {
} // namespace clang
-DeclarationName::NameKind DeclarationName::getNameKind() const {
- switch (getStoredNameKind()) {
- case StoredIdentifier: return Identifier;
- case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
- case StoredObjCOneArgSelector: return ObjCOneArgSelector;
-
- case StoredDeclarationNameExtra:
- switch (getExtra()->ExtraKindOrNumArgs) {
- case DeclarationNameExtra::CXXConstructor:
- return CXXConstructorName;
-
- case DeclarationNameExtra::CXXDestructor:
- return CXXDestructorName;
-
- case DeclarationNameExtra::CXXDeductionGuide:
- return CXXDeductionGuideName;
-
- case DeclarationNameExtra::CXXConversionFunction:
- return CXXConversionFunctionName;
-
- case DeclarationNameExtra::CXXLiteralOperator:
- return CXXLiteralOperatorName;
-
- case DeclarationNameExtra::CXXUsingDirective:
- return CXXUsingDirective;
-
- default:
- // Check if we have one of the CXXOperator* enumeration values.
- if (getExtra()->ExtraKindOrNumArgs <
- DeclarationNameExtra::CXXUsingDirective)
- return CXXOperatorName;
-
- return ObjCMultiArgSelector;
- }
- }
-
- // Can't actually get here.
- llvm_unreachable("This should be unreachable!");
-}
-
bool DeclarationName::isDependentName() const {
QualType T = getCXXNameType();
if (!T.isNull() && T->isDependentType())
@@ -337,241 +233,148 @@ std::string DeclarationName::getAsString() const {
return OS.str();
}
-QualType DeclarationName::getCXXNameType() const {
- if (CXXSpecialName *CXXName = getAsCXXSpecialName())
- return CXXName->Type;
- else
- return QualType();
-}
-
-TemplateDecl *DeclarationName::getCXXDeductionGuideTemplate() const {
- if (auto *Guide = getAsCXXDeductionGuideNameExtra())
- return Guide->Template;
- return nullptr;
-}
-
-OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
- if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
- unsigned value
- = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
- return static_cast<OverloadedOperatorKind>(value);
- } else {
- return OO_None;
- }
-}
-
-IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
- if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName())
- return CXXLit->ID;
- else
- return nullptr;
-}
-
-void *DeclarationName::getFETokenInfoAsVoidSlow() const {
+void *DeclarationName::getFETokenInfoSlow() const {
switch (getNameKind()) {
case Identifier:
- llvm_unreachable("Handled by getFETokenInfo()");
-
+ llvm_unreachable("case Identifier already handled by getFETokenInfo!");
case CXXConstructorName:
case CXXDestructorName:
case CXXConversionFunctionName:
- return getAsCXXSpecialName()->FETokenInfo;
-
- case CXXDeductionGuideName:
- return getAsCXXDeductionGuideNameExtra()->FETokenInfo;
-
+ return castAsCXXSpecialNameExtra()->FETokenInfo;
case CXXOperatorName:
- return getAsCXXOperatorIdName()->FETokenInfo;
-
+ return castAsCXXOperatorIdName()->FETokenInfo;
+ case CXXDeductionGuideName:
+ return castAsCXXDeductionGuideNameExtra()->FETokenInfo;
case CXXLiteralOperatorName:
- return getAsCXXLiteralOperatorIdName()->FETokenInfo;
-
+ return castAsCXXLiteralOperatorIdName()->FETokenInfo;
default:
- llvm_unreachable("Declaration name has no FETokenInfo");
+ llvm_unreachable("DeclarationName has no FETokenInfo!");
}
}
-void DeclarationName::setFETokenInfo(void *T) {
+void DeclarationName::setFETokenInfoSlow(void *T) {
switch (getNameKind()) {
case Identifier:
- getAsIdentifierInfo()->setFETokenInfo(T);
- break;
-
+ llvm_unreachable("case Identifier already handled by setFETokenInfo!");
case CXXConstructorName:
case CXXDestructorName:
case CXXConversionFunctionName:
- getAsCXXSpecialName()->FETokenInfo = T;
+ castAsCXXSpecialNameExtra()->FETokenInfo = T;
break;
-
- case CXXDeductionGuideName:
- getAsCXXDeductionGuideNameExtra()->FETokenInfo = T;
- break;
-
case CXXOperatorName:
- getAsCXXOperatorIdName()->FETokenInfo = T;
+ castAsCXXOperatorIdName()->FETokenInfo = T;
+ break;
+ case CXXDeductionGuideName:
+ castAsCXXDeductionGuideNameExtra()->FETokenInfo = T;
break;
-
case CXXLiteralOperatorName:
- getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
+ castAsCXXLiteralOperatorIdName()->FETokenInfo = T;
break;
-
default:
- llvm_unreachable("Declaration name has no FETokenInfo");
+ llvm_unreachable("DeclarationName has no FETokenInfo!");
}
}
-DeclarationName DeclarationName::getUsingDirectiveName() {
- // Single instance of DeclarationNameExtra for using-directive
- static const DeclarationNameExtra UDirExtra =
- { DeclarationNameExtra::CXXUsingDirective };
-
- uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
- Ptr |= StoredDeclarationNameExtra;
-
- return DeclarationName(Ptr);
-}
-
LLVM_DUMP_METHOD void DeclarationName::dump() const {
llvm::errs() << *this << '\n';
}
DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
- CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
- CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
- CXXDeductionGuideNames = new llvm::FoldingSet<CXXDeductionGuideNameExtra>;
-
// Initialize the overloaded operator names.
- CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
- for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
- CXXOperatorNames[Op].ExtraKindOrNumArgs
- = Op + DeclarationNameExtra::CXXConversionFunction;
- CXXOperatorNames[Op].FETokenInfo = nullptr;
- }
-}
-
-DeclarationNameTable::~DeclarationNameTable() {
- auto *SpecialNames =
- static_cast<llvm::FoldingSet<CXXSpecialName> *>(CXXSpecialNamesImpl);
- auto *LiteralNames =
- static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName> *>(
- CXXLiteralOperatorNames);
- auto *DeductionGuideNames =
- static_cast<llvm::FoldingSet<CXXDeductionGuideNameExtra> *>(
- CXXDeductionGuideNames);
-
- delete SpecialNames;
- delete LiteralNames;
- delete DeductionGuideNames;
-}
-
-DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) {
- return getCXXSpecialName(DeclarationName::CXXConstructorName,
- Ty.getUnqualifiedType());
-}
-
-DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) {
- return getCXXSpecialName(DeclarationName::CXXDestructorName,
- Ty.getUnqualifiedType());
+ for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op)
+ CXXOperatorNames[Op].Kind = static_cast<OverloadedOperatorKind>(Op);
}
DeclarationName
DeclarationNameTable::getCXXDeductionGuideName(TemplateDecl *Template) {
Template = cast<TemplateDecl>(Template->getCanonicalDecl());
- auto *DeductionGuideNames =
- static_cast<llvm::FoldingSet<CXXDeductionGuideNameExtra> *>(
- CXXDeductionGuideNames);
-
llvm::FoldingSetNodeID ID;
ID.AddPointer(Template);
void *InsertPos = nullptr;
- if (auto *Name = DeductionGuideNames->FindNodeOrInsertPos(ID, InsertPos))
+ if (auto *Name = CXXDeductionGuideNames.FindNodeOrInsertPos(ID, InsertPos))
return DeclarationName(Name);
- auto *Name = new (Ctx) CXXDeductionGuideNameExtra;
- Name->ExtraKindOrNumArgs = DeclarationNameExtra::CXXDeductionGuide;
- Name->Template = Template;
- Name->FETokenInfo = nullptr;
-
- DeductionGuideNames->InsertNode(Name, InsertPos);
+ auto *Name = new (Ctx) detail::CXXDeductionGuideNameExtra(Template);
+ CXXDeductionGuideNames.InsertNode(Name, InsertPos);
return DeclarationName(Name);
}
+DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) {
+ // The type of constructors is unqualified.
+ Ty = Ty.getUnqualifiedType();
+ // Do we already have this C++ constructor name ?
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Ty.getAsOpaquePtr());
+ void *InsertPos = nullptr;
+ if (auto *Name = CXXConstructorNames.FindNodeOrInsertPos(ID, InsertPos))
+ return {Name, DeclarationName::StoredCXXConstructorName};
+
+ // We have to create it.
+ auto *SpecialName = new (Ctx) detail::CXXSpecialNameExtra(Ty);
+ CXXConstructorNames.InsertNode(SpecialName, InsertPos);
+ return {SpecialName, DeclarationName::StoredCXXConstructorName};
+}
+
+DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) {
+ // The type of destructors is unqualified.
+ Ty = Ty.getUnqualifiedType();
+ // Do we already have this C++ destructor name ?
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Ty.getAsOpaquePtr());
+ void *InsertPos = nullptr;
+ if (auto *Name = CXXDestructorNames.FindNodeOrInsertPos(ID, InsertPos))
+ return {Name, DeclarationName::StoredCXXDestructorName};
+
+ // We have to create it.
+ auto *SpecialName = new (Ctx) detail::CXXSpecialNameExtra(Ty);
+ CXXDestructorNames.InsertNode(SpecialName, InsertPos);
+ return {SpecialName, DeclarationName::StoredCXXDestructorName};
+}
+
DeclarationName
DeclarationNameTable::getCXXConversionFunctionName(CanQualType Ty) {
- return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty);
+ // Do we already have this C++ conversion function name ?
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Ty.getAsOpaquePtr());
+ void *InsertPos = nullptr;
+ if (auto *Name =
+ CXXConversionFunctionNames.FindNodeOrInsertPos(ID, InsertPos))
+ return {Name, DeclarationName::StoredCXXConversionFunctionName};
+
+ // We have to create it.
+ auto *SpecialName = new (Ctx) detail::CXXSpecialNameExtra(Ty);
+ CXXConversionFunctionNames.InsertNode(SpecialName, InsertPos);
+ return {SpecialName, DeclarationName::StoredCXXConversionFunctionName};
}
DeclarationName
DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
CanQualType Ty) {
- assert(Kind >= DeclarationName::CXXConstructorName &&
- Kind <= DeclarationName::CXXConversionFunctionName &&
- "Kind must be a C++ special name kind");
- llvm::FoldingSet<CXXSpecialName> *SpecialNames
- = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
-
- DeclarationNameExtra::ExtraKind EKind;
switch (Kind) {
case DeclarationName::CXXConstructorName:
- EKind = DeclarationNameExtra::CXXConstructor;
- assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified");
- break;
+ return getCXXConstructorName(Ty);
case DeclarationName::CXXDestructorName:
- EKind = DeclarationNameExtra::CXXDestructor;
- assert(!Ty.hasQualifiers() && "Destructor type must be unqualified");
- break;
+ return getCXXDestructorName(Ty);
case DeclarationName::CXXConversionFunctionName:
- EKind = DeclarationNameExtra::CXXConversionFunction;
- break;
+ return getCXXConversionFunctionName(Ty);
default:
- return DeclarationName();
+ llvm_unreachable("Invalid kind in getCXXSpecialName!");
}
-
- // Unique selector, to guarantee there is one per name.
- llvm::FoldingSetNodeID ID;
- ID.AddInteger(EKind);
- ID.AddPointer(Ty.getAsOpaquePtr());
-
- void *InsertPos = nullptr;
- if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
- return DeclarationName(Name);
-
- CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
- SpecialName->ExtraKindOrNumArgs = EKind;
- SpecialName->Type = Ty;
- SpecialName->FETokenInfo = nullptr;
-
- SpecialNames->InsertNode(SpecialName, InsertPos);
- return DeclarationName(SpecialName);
-}
-
-DeclarationName
-DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
- return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
}
DeclarationName
DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
- llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
- = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
- (CXXLiteralOperatorNames);
-
llvm::FoldingSetNodeID ID;
ID.AddPointer(II);
void *InsertPos = nullptr;
- if (CXXLiteralOperatorIdName *Name =
- LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
- return DeclarationName (Name);
-
- CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
- LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
- LiteralName->ID = II;
- LiteralName->FETokenInfo = nullptr;
+ if (auto *Name = CXXLiteralOperatorNames.FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
- LiteralNames->InsertNode(LiteralName, InsertPos);
+ auto *LiteralName = new (Ctx) detail::CXXLiteralOperatorIdName(II);
+ CXXLiteralOperatorNames.InsertNode(LiteralName, InsertPos);
return DeclarationName(LiteralName);
}
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 7556c76c38bd..7cdd3b2c2a30 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -28,7 +28,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -192,7 +191,7 @@ bool Expr::isKnownToHaveBooleanValue() const {
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getExprLoc().
//
-// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
+// See also Stmt.cpp:{getBeginLoc(),getEndLoc()}.
namespace {
/// This implementation is used when a class provides a custom
/// implementation of getExprLoc.
@@ -209,7 +208,7 @@ namespace {
template <class E>
SourceLocation getExprLocImpl(const Expr *expr,
SourceLocation (Expr::*v)() const) {
- return static_cast<const E*>(expr)->getLocStart();
+ return static_cast<const E *>(expr)->getBeginLoc();
}
}
@@ -342,16 +341,32 @@ void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
ExprBits.ContainsUnexpandedParameterPack = true;
}
+DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
+ bool RefersToEnclosingVariableOrCapture, QualType T,
+ ExprValueKind VK, SourceLocation L,
+ const DeclarationNameLoc &LocInfo)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), DNLoc(LocInfo) {
+ DeclRefExprBits.HasQualifier = false;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo = false;
+ DeclRefExprBits.HasFoundDecl = false;
+ DeclRefExprBits.HadMultipleCandidates = false;
+ DeclRefExprBits.RefersToEnclosingVariableOrCapture =
+ RefersToEnclosingVariableOrCapture;
+ DeclRefExprBits.Loc = L;
+ computeDependence(Ctx);
+}
+
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- ValueDecl *D, bool RefersToEnclosingVariableOrCapture,
- const DeclarationNameInfo &NameInfo,
- NamedDecl *FoundD,
+ SourceLocation TemplateKWLoc, ValueDecl *D,
+ bool RefersToEnclosingVariableOrCapture,
+ const DeclarationNameInfo &NameInfo, NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
- D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), DNLoc(NameInfo.getInfo()) {
+ DeclRefExprBits.Loc = NameInfo.getLoc();
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
if (QualifierLoc) {
new (getTrailingObjects<NestedNameSpecifierLoc>())
@@ -447,31 +462,56 @@ DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
return new (Mem) DeclRefExpr(EmptyShell());
}
-SourceLocation DeclRefExpr::getLocStart() const {
+SourceLocation DeclRefExpr::getBeginLoc() const {
if (hasQualifier())
return getQualifierLoc().getBeginLoc();
- return getNameInfo().getLocStart();
+ return getNameInfo().getBeginLoc();
}
-SourceLocation DeclRefExpr::getLocEnd() const {
+SourceLocation DeclRefExpr::getEndLoc() const {
if (hasExplicitTemplateArgs())
return getRAngleLoc();
- return getNameInfo().getLocEnd();
+ return getNameInfo().getEndLoc();
}
-PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentType IT,
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
FNTy->isDependentType(), FNTy->isDependentType(),
FNTy->isInstantiationDependentType(),
- /*ContainsUnexpandedParameterPack=*/false),
- Loc(L), Type(IT), FnName(SL) {}
+ /*ContainsUnexpandedParameterPack=*/false) {
+ PredefinedExprBits.Kind = IK;
+ assert((getIdentKind() == IK) &&
+ "IdentKind do not fit in PredefinedExprBitfields!");
+ bool HasFunctionName = SL != nullptr;
+ PredefinedExprBits.HasFunctionName = HasFunctionName;
+ PredefinedExprBits.Loc = L;
+ if (HasFunctionName)
+ setFunctionName(SL);
+}
+
+PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
+ : Expr(PredefinedExprClass, Empty) {
+ PredefinedExprBits.HasFunctionName = HasFunctionName;
+}
-StringLiteral *PredefinedExpr::getFunctionName() {
- return cast_or_null<StringLiteral>(FnName);
+PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType FNTy, IdentKind IK,
+ StringLiteral *SL) {
+ bool HasFunctionName = SL != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
+ alignof(PredefinedExpr));
+ return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
}
-StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
- switch (IT) {
+PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
+ bool HasFunctionName) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
+ alignof(PredefinedExpr));
+ return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
+}
+
+StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
+ switch (IK) {
case Func:
return "__func__";
case Function:
@@ -489,15 +529,15 @@ StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
case PrettyFunctionNoVirtual:
break;
}
- llvm_unreachable("Unknown ident type for PredefinedExpr");
+ llvm_unreachable("Unknown ident kind for PredefinedExpr");
}
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
-std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
+std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
ASTContext &Context = CurrentDecl->getASTContext();
- if (IT == PredefinedExpr::FuncDName) {
+ if (IK == PredefinedExpr::FuncDName) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
@@ -532,21 +572,21 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
llvm::raw_svector_ostream Out(Buffer);
if (auto *DCBlock = dyn_cast<BlockDecl>(DC))
// For nested blocks, propagate up to the parent.
- Out << ComputeName(IT, DCBlock);
+ Out << ComputeName(IK, DCBlock);
else if (auto *DCDecl = dyn_cast<Decl>(DC))
- Out << ComputeName(IT, DCDecl) << "_block_invoke";
+ Out << ComputeName(IK, DCDecl) << "_block_invoke";
return Out.str();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
- if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual &&
- IT != FuncSig && IT != LFuncSig)
+ if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual &&
+ IK != FuncSig && IK != LFuncSig)
return FD->getNameAsString();
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
+ if (MD->isVirtual() && IK != PrettyFunctionNoVirtual)
Out << "virtual ";
if (MD->isStatic())
Out << "static ";
@@ -564,7 +604,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
- if (IT == FuncSig || IT == LFuncSig) {
+ if (IK == FuncSig || IK == LFuncSig) {
switch (AFT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
@@ -589,7 +629,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
if (FT->isVariadic()) {
if (FD->getNumParams()) POut << ", ";
POut << "...";
- } else if ((IT == FuncSig || IT == LFuncSig ||
+ } else if ((IK == FuncSig || IK == LFuncSig ||
!Context.getLangOpts().CPlusPlus) &&
!Decl->getNumParams()) {
POut << "void";
@@ -688,7 +728,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
// CapturedDecl.
if (DC->isFunctionOrMethod() && (DC->getDeclKind() != Decl::Captured)) {
const Decl *D = Decl::castFromDeclContext(DC);
- return ComputeName(IT, D);
+ return ComputeName(IK, D);
}
llvm_unreachable("CapturedDecl not inside a function or method");
}
@@ -713,7 +753,7 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
return Name.str().str();
}
- if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
+ if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
return "top level";
}
@@ -785,7 +825,7 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
// which is 43 characters.
SmallString<64> S;
FixedPointValueToString(
- S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale, Radix);
+ S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale);
return S.str();
}
@@ -861,66 +901,105 @@ double FloatingLiteral::getValueAsApproximateDouble() const {
return V.convertToDouble();
}
-int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
- int CharByteWidth = 0;
- switch(k) {
- case Ascii:
- case UTF8:
- CharByteWidth = target.getCharWidth();
- break;
- case Wide:
- CharByteWidth = target.getWCharWidth();
- break;
- case UTF16:
- CharByteWidth = target.getChar16Width();
- break;
- case UTF32:
- CharByteWidth = target.getChar32Width();
- break;
+unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
+ StringKind SK) {
+ unsigned CharByteWidth = 0;
+ switch (SK) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = Target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = Target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = Target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = Target.getChar32Width();
+ break;
}
assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
CharByteWidth /= 8;
- assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
- && "character byte widths supported are 1, 2, and 4 only");
+ assert((CharByteWidth == 1 || CharByteWidth == 2 || CharByteWidth == 4) &&
+ "The only supported character byte widths are 1,2 and 4!");
return CharByteWidth;
}
-StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
- const SourceLocation *Loc,
- unsigned NumStrs) {
- assert(C.getAsConstantArrayType(Ty) &&
+StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumConcatenated)
+ : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
+ false) {
+ assert(Ctx.getAsConstantArrayType(Ty) &&
"StringLiteral must be of constant array type!");
+ unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
+ unsigned ByteLength = Str.size();
+ assert((ByteLength % CharByteWidth == 0) &&
+ "The size of the data must be a multiple of CharByteWidth!");
+
+ // Avoid the expensive division. The compiler should be able to figure it
+ // out by itself. However as of clang 7, even with the appropriate
+ // llvm_unreachable added just here, it is not able to do so.
+ unsigned Length;
+ switch (CharByteWidth) {
+ case 1:
+ Length = ByteLength;
+ break;
+ case 2:
+ Length = ByteLength / 2;
+ break;
+ case 4:
+ Length = ByteLength / 4;
+ break;
+ default:
+ llvm_unreachable("Unsupported character width!");
+ }
- // Allocate enough space for the StringLiteral plus an array of locations for
- // any concatenated string tokens.
- void *Mem =
- C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
- alignof(StringLiteral));
- StringLiteral *SL = new (Mem) StringLiteral(Ty);
+ StringLiteralBits.Kind = Kind;
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.IsPascal = Pascal;
+ StringLiteralBits.NumConcatenated = NumConcatenated;
+ *getTrailingObjects<unsigned>() = Length;
- // OPTIMIZE: could allocate this appended to the StringLiteral.
- SL->setString(C,Str,Kind,Pascal);
+ // Initialize the trailing array of SourceLocation.
+ // This is safe since SourceLocation is POD-like.
+ std::memcpy(getTrailingObjects<SourceLocation>(), Loc,
+ NumConcatenated * sizeof(SourceLocation));
- SL->TokLocs[0] = Loc[0];
- SL->NumConcatenated = NumStrs;
+ // Initialize the trailing array of char holding the string data.
+ std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+}
- if (NumStrs != 1)
- memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
- return SL;
+StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated,
+ unsigned Length, unsigned CharByteWidth)
+ : Expr(StringLiteralClass, Empty) {
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.NumConcatenated = NumConcatenated;
+ *getTrailingObjects<unsigned>() = Length;
}
-StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
- unsigned NumStrs) {
- void *Mem =
- C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
- alignof(StringLiteral));
- StringLiteral *SL =
- new (Mem) StringLiteral(C.adjustStringLiteralBaseType(QualType()));
- SL->CharByteWidth = 0;
- SL->Length = 0;
- SL->NumConcatenated = NumStrs;
- return SL;
+StringLiteral *StringLiteral::Create(const ASTContext &Ctx, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumConcatenated) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>(
+ 1, NumConcatenated, Str.size()),
+ alignof(StringLiteral));
+ return new (Mem)
+ StringLiteral(Ctx, Str, Kind, Pascal, Ty, Loc, NumConcatenated);
+}
+
+StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumConcatenated,
+ unsigned Length,
+ unsigned CharByteWidth) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>(
+ 1, NumConcatenated, Length * CharByteWidth),
+ alignof(StringLiteral));
+ return new (Mem)
+ StringLiteral(EmptyShell(), NumConcatenated, Length, CharByteWidth);
}
void StringLiteral::outputString(raw_ostream &OS) const {
@@ -1019,42 +1098,6 @@ void StringLiteral::outputString(raw_ostream &OS) const {
OS << '"';
}
-void StringLiteral::setString(const ASTContext &C, StringRef Str,
- StringKind Kind, bool IsPascal) {
- //FIXME: we assume that the string data comes from a target that uses the same
- // code unit size and endianness for the type of string.
- this->Kind = Kind;
- this->IsPascal = IsPascal;
-
- CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
- assert((Str.size()%CharByteWidth == 0)
- && "size of data must be multiple of CharByteWidth");
- Length = Str.size()/CharByteWidth;
-
- switch(CharByteWidth) {
- case 1: {
- char *AStrData = new (C) char[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asChar = AStrData;
- break;
- }
- case 2: {
- uint16_t *AStrData = new (C) uint16_t[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asUInt16 = AStrData;
- break;
- }
- case 4: {
- uint32_t *AStrData = new (C) uint32_t[Length];
- std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
- StrData.asUInt32 = AStrData;
- break;
- }
- default:
- llvm_unreachable("unsupported CharByteWidth");
- }
-}
-
/// getLocationOfByte - Return a source location that points to the specified
/// byte of this string literal.
///
@@ -1076,7 +1119,8 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features,
const TargetInfo &Target, unsigned *StartToken,
unsigned *StartTokenByteOffset) const {
- assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
+ assert((getKind() == StringLiteral::Ascii ||
+ getKind() == StringLiteral::UTF8) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
@@ -1144,8 +1188,6 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
}
}
-
-
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++".
StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
@@ -1192,49 +1234,98 @@ OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
// Postfix Operators.
//===----------------------------------------------------------------------===//
-CallExpr::CallExpr(const ASTContext &C, StmtClass SC, Expr *fn,
- ArrayRef<Expr *> preargs, ArrayRef<Expr *> args, QualType t,
- ExprValueKind VK, SourceLocation rparenloc)
- : Expr(SC, t, VK, OK_Ordinary, fn->isTypeDependent(),
- fn->isValueDependent(), fn->isInstantiationDependent(),
- fn->containsUnexpandedParameterPack()),
- NumArgs(args.size()) {
+CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
+ ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
+ SourceLocation RParenLoc, unsigned MinNumArgs,
+ ADLCallKind UsesADL)
+ : Expr(SC, Ty, VK, OK_Ordinary, Fn->isTypeDependent(),
+ Fn->isValueDependent(), Fn->isInstantiationDependent(),
+ Fn->containsUnexpandedParameterPack()),
+ RParenLoc(RParenLoc) {
+ NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
+ unsigned NumPreArgs = PreArgs.size();
+ CallExprBits.NumPreArgs = NumPreArgs;
+ assert((NumPreArgs == getNumPreArgs()) && "NumPreArgs overflow!");
+
+ unsigned OffsetToTrailingObjects = offsetToTrailingObjects(SC);
+ CallExprBits.OffsetToTrailingObjects = OffsetToTrailingObjects;
+ assert((CallExprBits.OffsetToTrailingObjects == OffsetToTrailingObjects) &&
+ "OffsetToTrailingObjects overflow!");
- unsigned NumPreArgs = preargs.size();
- SubExprs = new (C) Stmt *[args.size()+PREARGS_START+NumPreArgs];
- SubExprs[FN] = fn;
- for (unsigned i = 0; i != NumPreArgs; ++i) {
- updateDependenciesFromArg(preargs[i]);
- SubExprs[i+PREARGS_START] = preargs[i];
+ CallExprBits.UsesADL = static_cast<bool>(UsesADL);
+
+ setCallee(Fn);
+ for (unsigned I = 0; I != NumPreArgs; ++I) {
+ updateDependenciesFromArg(PreArgs[I]);
+ setPreArg(I, PreArgs[I]);
+ }
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ updateDependenciesFromArg(Args[I]);
+ setArg(I, Args[I]);
}
- for (unsigned i = 0; i != args.size(); ++i) {
- updateDependenciesFromArg(args[i]);
- SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
+ for (unsigned I = Args.size(); I != NumArgs; ++I) {
+ setArg(I, nullptr);
}
+}
+CallExpr::CallExpr(StmtClass SC, unsigned NumPreArgs, unsigned NumArgs,
+ EmptyShell Empty)
+ : Expr(SC, Empty), NumArgs(NumArgs) {
CallExprBits.NumPreArgs = NumPreArgs;
- RParenLoc = rparenloc;
+ assert((NumPreArgs == getNumPreArgs()) && "NumPreArgs overflow!");
+
+ unsigned OffsetToTrailingObjects = offsetToTrailingObjects(SC);
+ CallExprBits.OffsetToTrailingObjects = OffsetToTrailingObjects;
+ assert((CallExprBits.OffsetToTrailingObjects == OffsetToTrailingObjects) &&
+ "OffsetToTrailingObjects overflow!");
}
-CallExpr::CallExpr(const ASTContext &C, StmtClass SC, Expr *fn,
- ArrayRef<Expr *> args, QualType t, ExprValueKind VK,
- SourceLocation rparenloc)
- : CallExpr(C, SC, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc) {}
+CallExpr *CallExpr::Create(const ASTContext &Ctx, Expr *Fn,
+ ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
+ SourceLocation RParenLoc, unsigned MinNumArgs,
+ ADLCallKind UsesADL) {
+ unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem =
+ Ctx.Allocate(sizeof(CallExpr) + SizeOfTrailingObjects, alignof(CallExpr));
+ return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
+ RParenLoc, MinNumArgs, UsesADL);
+}
-CallExpr::CallExpr(const ASTContext &C, Expr *fn, ArrayRef<Expr *> args,
- QualType t, ExprValueKind VK, SourceLocation rparenloc)
- : CallExpr(C, CallExprClass, fn, ArrayRef<Expr *>(), args, t, VK, rparenloc) {
+CallExpr *CallExpr::CreateTemporary(void *Mem, Expr *Fn, QualType Ty,
+ ExprValueKind VK, SourceLocation RParenLoc,
+ ADLCallKind UsesADL) {
+ assert(!(reinterpret_cast<uintptr_t>(Mem) % alignof(CallExpr)) &&
+ "Misaligned memory in CallExpr::CreateTemporary!");
+ return new (Mem) CallExpr(CallExprClass, Fn, /*PreArgs=*/{}, /*Args=*/{}, Ty,
+ VK, RParenLoc, /*MinNumArgs=*/0, UsesADL);
}
-CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty)
- : CallExpr(C, SC, /*NumPreArgs=*/0, Empty) {}
+CallExpr *CallExpr::CreateEmpty(const ASTContext &Ctx, unsigned NumArgs,
+ EmptyShell Empty) {
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem =
+ Ctx.Allocate(sizeof(CallExpr) + SizeOfTrailingObjects, alignof(CallExpr));
+ return new (Mem) CallExpr(CallExprClass, /*NumPreArgs=*/0, NumArgs, Empty);
+}
-CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs,
- EmptyShell Empty)
- : Expr(SC, Empty), SubExprs(nullptr), NumArgs(0) {
- // FIXME: Why do we allocate this?
- SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs]();
- CallExprBits.NumPreArgs = NumPreArgs;
+unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
+ switch (SC) {
+ case CallExprClass:
+ return sizeof(CallExpr);
+ case CXXOperatorCallExprClass:
+ return sizeof(CXXOperatorCallExpr);
+ case CXXMemberCallExprClass:
+ return sizeof(CXXMemberCallExpr);
+ case UserDefinedLiteralClass:
+ return sizeof(UserDefinedLiteral);
+ case CUDAKernelCallExprClass:
+ return sizeof(CUDAKernelCallExpr);
+ default:
+ llvm_unreachable("unexpected class deriving from CallExpr!");
+ }
}
void CallExpr::updateDependenciesFromArg(Expr *Arg) {
@@ -1248,14 +1339,6 @@ void CallExpr::updateDependenciesFromArg(Expr *Arg) {
ExprBits.ContainsUnexpandedParameterPack = true;
}
-FunctionDecl *CallExpr::getDirectCallee() {
- return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
-}
-
-Decl *CallExpr::getCalleeDecl() {
- return getCallee()->getReferencedDeclOfCallee();
-}
-
Decl *Expr::getReferencedDeclOfCallee() {
Expr *CEE = IgnoreParenImpCasts();
@@ -1280,35 +1363,6 @@ Decl *Expr::getReferencedDeclOfCallee() {
return nullptr;
}
-/// setNumArgs - This changes the number of arguments present in this call.
-/// Any orphaned expressions are deleted by this, and any new operands are set
-/// to null.
-void CallExpr::setNumArgs(const ASTContext& C, unsigned NumArgs) {
- // No change, just return.
- if (NumArgs == getNumArgs()) return;
-
- // If shrinking # arguments, just delete the extras and forgot them.
- if (NumArgs < getNumArgs()) {
- this->NumArgs = NumArgs;
- return;
- }
-
- // Otherwise, we are growing the # arguments. New an bigger argument array.
- unsigned NumPreArgs = getNumPreArgs();
- Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
- // Copy over args.
- for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
- NewSubExprs[i] = SubExprs[i];
- // Null out new args.
- for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
- i != NumArgs+PREARGS_START+NumPreArgs; ++i)
- NewSubExprs[i] = nullptr;
-
- if (SubExprs) C.Deallocate(SubExprs);
- SubExprs = NewSubExprs;
- this->NumArgs = NumArgs;
-}
-
/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
@@ -1358,22 +1412,35 @@ QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
return FnType->getReturnType();
}
-SourceLocation CallExpr::getLocStart() const {
+const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
+ // If the return type is a struct, union, or enum that is marked nodiscard,
+ // then return the return type attribute.
+ if (const TagDecl *TD = getCallReturnType(Ctx)->getAsTagDecl())
+ if (const auto *A = TD->getAttr<WarnUnusedResultAttr>())
+ return A;
+
+ // Otherwise, see if the callee is marked nodiscard and return that attribute
+ // instead.
+ const Decl *D = getCalleeDecl();
+ return D ? D->getAttr<WarnUnusedResultAttr>() : nullptr;
+}
+
+SourceLocation CallExpr::getBeginLoc() const {
if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getLocStart();
+ return cast<CXXOperatorCallExpr>(this)->getBeginLoc();
- SourceLocation begin = getCallee()->getLocStart();
+ SourceLocation begin = getCallee()->getBeginLoc();
if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
- begin = getArg(0)->getLocStart();
+ begin = getArg(0)->getBeginLoc();
return begin;
}
-SourceLocation CallExpr::getLocEnd() const {
+SourceLocation CallExpr::getEndLoc() const {
if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getLocEnd();
+ return cast<CXXOperatorCallExpr>(this)->getEndLoc();
SourceLocation end = getRParenLoc();
if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
- end = getArg(getNumArgs() - 1)->getLocEnd();
+ end = getArg(getNumArgs() - 1)->getEndLoc();
return end;
}
@@ -1446,7 +1513,7 @@ UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
// Check to see if we are in the situation where alignof(decl) should be
// dependent because decl's alignment is dependent.
- if (ExprKind == UETT_AlignOf) {
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
if (!isValueDependent() || !isInstantiationDependent()) {
E = E->IgnoreParens();
@@ -1502,7 +1569,7 @@ MemberExpr *MemberExpr::Create(
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
E->setInstantiationDependent(true);
- E->HasQualifierOrFoundDecl = true;
+ E->MemberExprBits.HasQualifierOrFoundDecl = true;
MemberExprNameQualifier *NQ =
E->getTrailingObjects<MemberExprNameQualifier>();
@@ -1510,7 +1577,8 @@ MemberExpr *MemberExpr::Create(
NQ->FoundDecl = founddecl;
}
- E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+ E->MemberExprBits.HasTemplateKWAndArgsInfo =
+ (targs || TemplateKWLoc.isValid());
if (targs) {
bool Dependent = false;
@@ -1529,7 +1597,7 @@ MemberExpr *MemberExpr::Create(
return E;
}
-SourceLocation MemberExpr::getLocStart() const {
+SourceLocation MemberExpr::getBeginLoc() const {
if (isImplicitAccess()) {
if (hasQualifier())
return getQualifierLoc().getBeginLoc();
@@ -1538,17 +1606,17 @@ SourceLocation MemberExpr::getLocStart() const {
// FIXME: We don't want this to happen. Rather, we should be able to
// detect all kinds of implicit accesses more cleanly.
- SourceLocation BaseStartLoc = getBase()->getLocStart();
+ SourceLocation BaseStartLoc = getBase()->getBeginLoc();
if (BaseStartLoc.isValid())
return BaseStartLoc;
return MemberLoc;
}
-SourceLocation MemberExpr::getLocEnd() const {
+SourceLocation MemberExpr::getEndLoc() const {
SourceLocation EndLoc = getMemberNameInfo().getEndLoc();
if (hasExplicitTemplateArgs())
EndLoc = getRAngleLoc();
else if (EndLoc.isInvalid())
- EndLoc = getBase()->getLocEnd();
+ EndLoc = getBase()->getEndLoc();
return EndLoc;
}
@@ -1605,13 +1673,18 @@ bool CastExpr::CastConsistency() const {
assert(getSubExpr()->getType()->isFunctionType());
goto CheckNoBasePath;
- case CK_AddressSpaceConversion:
- assert(getType()->isPointerType() || getType()->isBlockPointerType());
- assert(getSubExpr()->getType()->isPointerType() ||
- getSubExpr()->getType()->isBlockPointerType());
- assert(getType()->getPointeeType().getAddressSpace() !=
- getSubExpr()->getType()->getPointeeType().getAddressSpace());
- LLVM_FALLTHROUGH;
+ case CK_AddressSpaceConversion: {
+ auto Ty = getType();
+ auto SETy = getSubExpr()->getType();
+ assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
+ if (isRValue()) {
+ Ty = Ty->getPointeeType();
+ SETy = SETy->getPointeeType();
+ }
+ assert(!Ty.isNull() && !SETy.isNull() &&
+ Ty.getAddressSpace() != SETy.getAddressSpace());
+ goto CheckNoBasePath;
+ }
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
@@ -1641,9 +1714,9 @@ bool CastExpr::CastConsistency() const {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1661,6 +1734,7 @@ bool CastExpr::CastConsistency() const {
case CK_LValueBitCast: // -> bool&
case CK_UserDefinedConversion: // operator bool()
case CK_BuiltinFnToFnPtr:
+ case CK_FixedPointToBoolean:
CheckNoBasePath:
assert(path_empty() && "Cast kind should not have a base path!");
break;
@@ -1734,21 +1808,6 @@ NamedDecl *CastExpr::getConversionFunction() const {
return nullptr;
}
-CastExpr::BasePathSizeTy *CastExpr::BasePathSize() {
- assert(!path_empty());
- switch (getStmtClass()) {
-#define ABSTRACT_STMT(x)
-#define CASTEXPR(Type, Base) \
- case Stmt::Type##Class: \
- return static_cast<Type *>(this) \
- ->getTrailingObjects<CastExpr::BasePathSizeTy>();
-#define STMT(Type, Base)
-#include "clang/AST/StmtNodes.inc"
- default:
- llvm_unreachable("non-cast expressions not possible here");
- }
-}
-
CXXBaseSpecifier **CastExpr::path_buffer() {
switch (getStmtClass()) {
#define ABSTRACT_STMT(x)
@@ -1787,9 +1846,7 @@ ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
const CXXCastPath *BasePath,
ExprValueKind VK) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
ImplicitCastExpr *E =
new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
if (PathSize)
@@ -1800,9 +1857,7 @@ ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
}
@@ -1813,9 +1868,7 @@ CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
TypeSourceInfo *WrittenTy,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
CStyleCastExpr *E =
new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
if (PathSize)
@@ -1826,9 +1879,7 @@ CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
}
@@ -2039,9 +2090,9 @@ bool InitListExpr::isIdiomaticZeroInitializer(const LangOptions &LangOpts) const
return Lit && Lit->getValue() == 0;
}
-SourceLocation InitListExpr::getLocStart() const {
+SourceLocation InitListExpr::getBeginLoc() const {
if (InitListExpr *SyntacticForm = getSyntacticForm())
- return SyntacticForm->getLocStart();
+ return SyntacticForm->getBeginLoc();
SourceLocation Beg = LBraceLoc;
if (Beg.isInvalid()) {
// Find the first non-null initializer.
@@ -2049,7 +2100,7 @@ SourceLocation InitListExpr::getLocStart() const {
E = InitExprs.end();
I != E; ++I) {
if (Stmt *S = *I) {
- Beg = S->getLocStart();
+ Beg = S->getBeginLoc();
break;
}
}
@@ -2057,9 +2108,9 @@ SourceLocation InitListExpr::getLocStart() const {
return Beg;
}
-SourceLocation InitListExpr::getLocEnd() const {
+SourceLocation InitListExpr::getEndLoc() const {
if (InitListExpr *SyntacticForm = getSyntacticForm())
- return SyntacticForm->getLocEnd();
+ return SyntacticForm->getEndLoc();
SourceLocation End = RBraceLoc;
if (End.isInvalid()) {
// Find the first non-null initializer from the end.
@@ -2067,7 +2118,7 @@ SourceLocation InitListExpr::getLocEnd() const {
E = InitExprs.rend();
I != E; ++I) {
if (Stmt *S = *I) {
- End = S->getLocEnd();
+ End = S->getEndLoc();
break;
}
}
@@ -2262,24 +2313,20 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// If this is a direct call, get the callee.
const CallExpr *CE = cast<CallExpr>(this);
if (const Decl *FD = CE->getCalleeDecl()) {
- const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD);
- bool HasWarnUnusedResultAttr = Func ? Func->hasUnusedResultAttr()
- : FD->hasAttr<WarnUnusedResultAttr>();
-
// If the callee has attribute pure, const, or warn_unused_result, warn
// about it. void foo() { strlen("bar"); } should warn.
//
// Note: If new cases are added here, DiagnoseUnusedExprResult should be
// updated to match for QoI.
- if (HasWarnUnusedResultAttr ||
+ if (CE->hasUnusedResultAttr(Ctx) ||
FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
WarnE = this;
- Loc = CE->getCallee()->getLocStart();
+ Loc = CE->getCallee()->getBeginLoc();
R1 = CE->getCallee()->getSourceRange();
if (unsigned NumArgs = CE->getNumArgs())
- R2 = SourceRange(CE->getArg(0)->getLocStart(),
- CE->getArg(NumArgs-1)->getLocEnd());
+ R2 = SourceRange(CE->getArg(0)->getBeginLoc(),
+ CE->getArg(NumArgs - 1)->getEndLoc());
return true;
}
}
@@ -2296,7 +2343,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
if (const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl()) {
if (Type->hasAttr<WarnUnusedAttr>()) {
WarnE = this;
- Loc = getLocStart();
+ Loc = getBeginLoc();
R1 = getSourceRange();
return true;
}
@@ -2396,7 +2443,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
WarnE = this;
if (const CXXFunctionalCastExpr *CXXCE =
dyn_cast<CXXFunctionalCastExpr>(this)) {
- Loc = CXXCE->getLocStart();
+ Loc = CXXCE->getBeginLoc();
R1 = CXXCE->getSubExpr()->getSourceRange();
} else {
const CStyleCastExpr *CStyleCE = cast<CStyleCastExpr>(this);
@@ -2535,6 +2582,10 @@ Expr* Expr::IgnoreParens() {
continue;
}
}
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E)) {
+ E = CE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2559,6 +2610,10 @@ Expr *Expr::IgnoreParenCasts() {
E = NTTP->getReplacement();
continue;
}
+ if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2580,6 +2635,10 @@ Expr *Expr::IgnoreCasts() {
E = NTTP->getReplacement();
continue;
}
+ if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
+ }
return E;
}
}
@@ -2605,6 +2664,9 @@ Expr *Expr::IgnoreParenLValueCasts() {
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
+ } else if (FullExpr *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
+ continue;
}
break;
}
@@ -2870,6 +2932,12 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
break;
}
+ case ConstantExprClass: {
+ // FIXME: We should be able to return "true" here, but it can lead to extra
+ // error messages. E.g. in Sema/array-init.c.
+ const Expr *Exp = cast<ConstantExpr>(this)->getSubExpr();
+ return Exp->isConstantInitializer(Ctx, false, Culprit);
+ }
case CompoundLiteralExprClass: {
// This handles gcc's extension that allows global initializers like
// "struct x {int x;} x = (struct x) {};".
@@ -2909,8 +2977,8 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
const Expr *Elt = ILE->getInit(ElementNo++);
if (Field->isBitField()) {
// Bitfields have to evaluate to an integer.
- llvm::APSInt ResultTmp;
- if (!Elt->EvaluateAsInt(ResultTmp, Ctx)) {
+ EvalResult Result;
+ if (!Elt->EvaluateAsInt(Result, Ctx)) {
if (Culprit)
*Culprit = Elt;
return false;
@@ -3095,6 +3163,11 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
// These never have a side-effect.
return false;
+ case ConstantExprClass:
+ // FIXME: Move this into the "return false;" block above.
+ return cast<ConstantExpr>(this)->getSubExpr()->HasSideEffects(
+ Ctx, IncludePossibleEffects);
+
case CallExprClass:
case CXXOperatorCallExprClass:
case CXXMemberCallExprClass:
@@ -3254,11 +3327,8 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case LambdaExprClass: {
const LambdaExpr *LE = cast<LambdaExpr>(this);
- for (LambdaExpr::capture_iterator I = LE->capture_begin(),
- E = LE->capture_end(); I != E; ++I)
- if (I->getCaptureKind() == LCK_ByCopy)
- // FIXME: Only has a side-effect if the variable is volatile or if
- // the copy would invoke a non-trivial copy constructor.
+ for (Expr *E : LE->capture_inits())
+ if (E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
}
@@ -3389,20 +3459,20 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
// Check that it is a cast to void*.
if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
QualType Pointee = PT->getPointeeType();
+ Qualifiers Qs = Pointee.getQualifiers();
// Only (void*)0 or equivalent are treated as nullptr. If pointee type
// has non-default address space it is not treated as nullptr.
// (__generic void*)0 in OpenCL 2.0 should not be treated as nullptr
// since it cannot be assigned to a pointer to constant address space.
- bool PointeeHasDefaultAS =
- Pointee.getAddressSpace() == LangAS::Default ||
- (Ctx.getLangOpts().OpenCLVersion >= 200 &&
+ if ((Ctx.getLangOpts().OpenCLVersion >= 200 &&
Pointee.getAddressSpace() == LangAS::opencl_generic) ||
(Ctx.getLangOpts().OpenCL &&
Ctx.getLangOpts().OpenCLVersion < 200 &&
- Pointee.getAddressSpace() == LangAS::opencl_private);
+ Pointee.getAddressSpace() == LangAS::opencl_private))
+ Qs.removeAddressSpace();
- if (PointeeHasDefaultAS && Pointee->isVoidType() && // to void*
- CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ if (Pointee->isVoidType() && Qs.empty() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int
return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
}
}
@@ -3866,11 +3936,11 @@ SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this);
if (size() == 1)
return DIE->getDesignator(0)->getSourceRange();
- return SourceRange(DIE->getDesignator(0)->getLocStart(),
- DIE->getDesignator(size()-1)->getLocEnd());
+ return SourceRange(DIE->getDesignator(0)->getBeginLoc(),
+ DIE->getDesignator(size() - 1)->getEndLoc());
}
-SourceLocation DesignatedInitExpr::getLocStart() const {
+SourceLocation DesignatedInitExpr::getBeginLoc() const {
SourceLocation StartLoc;
auto *DIE = const_cast<DesignatedInitExpr *>(this);
Designator &First = *DIE->getDesignator(0);
@@ -3885,8 +3955,8 @@ SourceLocation DesignatedInitExpr::getLocStart() const {
return StartLoc;
}
-SourceLocation DesignatedInitExpr::getLocEnd() const {
- return getInit()->getLocEnd();
+SourceLocation DesignatedInitExpr::getEndLoc() const {
+ return getInit()->getEndLoc();
}
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
@@ -3944,35 +4014,56 @@ DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
BaseAndUpdaterExprs[1] = ILE;
}
-SourceLocation DesignatedInitUpdateExpr::getLocStart() const {
- return getBase()->getLocStart();
+SourceLocation DesignatedInitUpdateExpr::getBeginLoc() const {
+ return getBase()->getBeginLoc();
}
-SourceLocation DesignatedInitUpdateExpr::getLocEnd() const {
- return getBase()->getLocEnd();
+SourceLocation DesignatedInitUpdateExpr::getEndLoc() const {
+ return getBase()->getEndLoc();
}
-ParenListExpr::ParenListExpr(const ASTContext& C, SourceLocation lparenloc,
- ArrayRef<Expr*> exprs,
- SourceLocation rparenloc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
- false, false, false, false),
- NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) {
- Exprs = new (C) Stmt*[exprs.size()];
- for (unsigned i = 0; i != exprs.size(); ++i) {
- if (exprs[i]->isTypeDependent())
+ParenListExpr::ParenListExpr(SourceLocation LParenLoc, ArrayRef<Expr *> Exprs,
+ SourceLocation RParenLoc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ ParenListExprBits.NumExprs = Exprs.size();
+
+ for (unsigned I = 0, N = Exprs.size(); I != N; ++I) {
+ if (Exprs[I]->isTypeDependent())
ExprBits.TypeDependent = true;
- if (exprs[i]->isValueDependent())
+ if (Exprs[I]->isValueDependent())
ExprBits.ValueDependent = true;
- if (exprs[i]->isInstantiationDependent())
+ if (Exprs[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
- if (exprs[i]->containsUnexpandedParameterPack())
+ if (Exprs[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- Exprs[i] = exprs[i];
+ getTrailingObjects<Stmt *>()[I] = Exprs[I];
}
}
+ParenListExpr::ParenListExpr(EmptyShell Empty, unsigned NumExprs)
+ : Expr(ParenListExprClass, Empty) {
+ ParenListExprBits.NumExprs = NumExprs;
+}
+
+ParenListExpr *ParenListExpr::Create(const ASTContext &Ctx,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Exprs,
+ SourceLocation RParenLoc) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(Exprs.size()),
+ alignof(ParenListExpr));
+ return new (Mem) ParenListExpr(LParenLoc, Exprs, RParenLoc);
+}
+
+ParenListExpr *ParenListExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumExprs) {
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumExprs), alignof(ParenListExpr));
+ return new (Mem) ParenListExpr(EmptyShell(), NumExprs);
+}
+
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 93d68ec8e0b2..3891f45c7fc2 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -89,88 +89,132 @@ QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const {
}
// CXXScalarValueInitExpr
-SourceLocation CXXScalarValueInitExpr::getLocStart() const {
- return TypeInfo ? TypeInfo->getTypeLoc().getBeginLoc() : RParenLoc;
+SourceLocation CXXScalarValueInitExpr::getBeginLoc() const {
+ return TypeInfo ? TypeInfo->getTypeLoc().getBeginLoc() : getRParenLoc();
}
// CXXNewExpr
-CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew,
- FunctionDecl *operatorNew, FunctionDecl *operatorDelete,
- bool PassAlignment, bool usualArrayDeleteWantsSize,
- ArrayRef<Expr*> placementArgs,
- SourceRange typeIdParens, Expr *arraySize,
- InitializationStyle initializationStyle,
- Expr *initializer, QualType ty,
- TypeSourceInfo *allocatedTypeInfo,
- SourceRange Range, SourceRange directInitRange)
- : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary, ty->isDependentType(),
- ty->isDependentType(), ty->isInstantiationDependentType(),
- ty->containsUnexpandedParameterPack()),
- OperatorNew(operatorNew), OperatorDelete(operatorDelete),
- AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
- Range(Range), DirectInitRange(directInitRange), GlobalNew(globalNew),
- PassAlignment(PassAlignment),
- UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
- assert((initializer != nullptr || initializationStyle == NoInit) &&
- "Only NoInit can have no initializer.");
- StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
- AllocateArgsArray(C, arraySize != nullptr, placementArgs.size(),
- initializer != nullptr);
- unsigned i = 0;
- if (Array) {
- if (arraySize->isInstantiationDependent())
+CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
+ FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
+ bool UsualArrayDeleteWantsSize,
+ ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
+ Expr *ArraySize, InitializationStyle InitializationStyle,
+ Expr *Initializer, QualType Ty,
+ TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
+ SourceRange DirectInitRange)
+ : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
+ Ty->isDependentType(), Ty->isInstantiationDependentType(),
+ Ty->containsUnexpandedParameterPack()),
+ OperatorNew(OperatorNew), OperatorDelete(OperatorDelete),
+ AllocatedTypeInfo(AllocatedTypeInfo), Range(Range),
+ DirectInitRange(DirectInitRange) {
+
+ assert((Initializer != nullptr || InitializationStyle == NoInit) &&
+ "Only NoInit can have no initializer!");
+
+ CXXNewExprBits.IsGlobalNew = IsGlobalNew;
+ CXXNewExprBits.IsArray = ArraySize != nullptr;
+ CXXNewExprBits.ShouldPassAlignment = ShouldPassAlignment;
+ CXXNewExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize;
+ CXXNewExprBits.StoredInitializationStyle =
+ Initializer ? InitializationStyle + 1 : 0;
+ bool IsParenTypeId = TypeIdParens.isValid();
+ CXXNewExprBits.IsParenTypeId = IsParenTypeId;
+ CXXNewExprBits.NumPlacementArgs = PlacementArgs.size();
+
+ if (ArraySize) {
+ if (ArraySize->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
-
- if (arraySize->containsUnexpandedParameterPack())
+ if (ArraySize->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- SubExprs[i++] = arraySize;
+ getTrailingObjects<Stmt *>()[arraySizeOffset()] = ArraySize;
}
- if (initializer) {
- if (initializer->isInstantiationDependent())
+ if (Initializer) {
+ if (Initializer->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
-
- if (initializer->containsUnexpandedParameterPack())
+ if (Initializer->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- SubExprs[i++] = initializer;
+ getTrailingObjects<Stmt *>()[initExprOffset()] = Initializer;
}
- for (unsigned j = 0; j != placementArgs.size(); ++j) {
- if (placementArgs[j]->isInstantiationDependent())
+ for (unsigned I = 0; I != PlacementArgs.size(); ++I) {
+ if (PlacementArgs[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
- if (placementArgs[j]->containsUnexpandedParameterPack())
+ if (PlacementArgs[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
- SubExprs[i++] = placementArgs[j];
+ getTrailingObjects<Stmt *>()[placementNewArgsOffset() + I] =
+ PlacementArgs[I];
}
+ if (IsParenTypeId)
+ getTrailingObjects<SourceRange>()[0] = TypeIdParens;
+
switch (getInitializationStyle()) {
case CallInit:
- this->Range.setEnd(DirectInitRange.getEnd()); break;
+ this->Range.setEnd(DirectInitRange.getEnd());
+ break;
case ListInit:
- this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break;
+ this->Range.setEnd(getInitializer()->getSourceRange().getEnd());
+ break;
default:
- if (TypeIdParens.isValid())
+ if (IsParenTypeId)
this->Range.setEnd(TypeIdParens.getEnd());
break;
}
}
-void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray,
- unsigned numPlaceArgs, bool hasInitializer){
- assert(SubExprs == nullptr && "SubExprs already allocated");
- Array = isArray;
- NumPlacementArgs = numPlaceArgs;
-
- unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
- SubExprs = new (C) Stmt*[TotalSize];
+CXXNewExpr::CXXNewExpr(EmptyShell Empty, bool IsArray,
+ unsigned NumPlacementArgs, bool IsParenTypeId)
+ : Expr(CXXNewExprClass, Empty) {
+ CXXNewExprBits.IsArray = IsArray;
+ CXXNewExprBits.NumPlacementArgs = NumPlacementArgs;
+ CXXNewExprBits.IsParenTypeId = IsParenTypeId;
+}
+
+CXXNewExpr *
+CXXNewExpr::Create(const ASTContext &Ctx, bool IsGlobalNew,
+ FunctionDecl *OperatorNew, FunctionDecl *OperatorDelete,
+ bool ShouldPassAlignment, bool UsualArrayDeleteWantsSize,
+ ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
+ Expr *ArraySize, InitializationStyle InitializationStyle,
+ Expr *Initializer, QualType Ty,
+ TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
+ SourceRange DirectInitRange) {
+ bool IsArray = ArraySize != nullptr;
+ bool HasInit = Initializer != nullptr;
+ unsigned NumPlacementArgs = PlacementArgs.size();
+ bool IsParenTypeId = TypeIdParens.isValid();
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *, SourceRange>(
+ IsArray + HasInit + NumPlacementArgs, IsParenTypeId),
+ alignof(CXXNewExpr));
+ return new (Mem)
+ CXXNewExpr(IsGlobalNew, OperatorNew, OperatorDelete, ShouldPassAlignment,
+ UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens,
+ ArraySize, InitializationStyle, Initializer, Ty,
+ AllocatedTypeInfo, Range, DirectInitRange);
+}
+
+CXXNewExpr *CXXNewExpr::CreateEmpty(const ASTContext &Ctx, bool IsArray,
+ bool HasInit, unsigned NumPlacementArgs,
+ bool IsParenTypeId) {
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *, SourceRange>(
+ IsArray + HasInit + NumPlacementArgs, IsParenTypeId),
+ alignof(CXXNewExpr));
+ return new (Mem)
+ CXXNewExpr(EmptyShell(), IsArray, NumPlacementArgs, IsParenTypeId);
}
-bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const {
- return getOperatorNew()->getType()->castAs<FunctionProtoType>()
- ->isNothrow() &&
+bool CXXNewExpr::shouldNullCheckAllocation() const {
+ return getOperatorNew()
+ ->getType()
+ ->castAs<FunctionProtoType>()
+ ->isNothrow() &&
!getOperatorNew()->isReservedGlobalPlacementOperator();
}
@@ -250,7 +294,7 @@ QualType CXXPseudoDestructorExpr::getDestroyedType() const {
return QualType();
}
-SourceLocation CXXPseudoDestructorExpr::getLocEnd() const {
+SourceLocation CXXPseudoDestructorExpr::getEndLoc() const {
SourceLocation End = DestroyedType.getLocation();
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
@@ -258,68 +302,95 @@ SourceLocation CXXPseudoDestructorExpr::getLocEnd() const {
}
// UnresolvedLookupExpr
-UnresolvedLookupExpr *
-UnresolvedLookupExpr::Create(const ASTContext &C,
- CXXRecordDecl *NamingClass,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- bool ADL,
- const TemplateArgumentListInfo *Args,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
- assert(Args || TemplateKWLoc.isValid());
- unsigned num_args = Args ? Args->size() : 0;
+UnresolvedLookupExpr::UnresolvedLookupExpr(
+ const ASTContext &Context, CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo, bool RequiresADL, bool Overloaded,
+ const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : OverloadExpr(UnresolvedLookupExprClass, Context, QualifierLoc,
+ TemplateKWLoc, NameInfo, TemplateArgs, Begin, End, false,
+ false, false),
+ NamingClass(NamingClass) {
+ UnresolvedLookupExprBits.RequiresADL = RequiresADL;
+ UnresolvedLookupExprBits.Overloaded = Overloaded;
+}
+
+UnresolvedLookupExpr::UnresolvedLookupExpr(EmptyShell Empty,
+ unsigned NumResults,
+ bool HasTemplateKWAndArgsInfo)
+ : OverloadExpr(UnresolvedLookupExprClass, Empty, NumResults,
+ HasTemplateKWAndArgsInfo) {}
+
+UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
+ const ASTContext &Context, CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
+ bool RequiresADL, bool Overloaded, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ unsigned NumResults = End - Begin;
+ unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(NumResults, 0, 0);
+ void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
+ return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
+ SourceLocation(), NameInfo, RequiresADL,
+ Overloaded, nullptr, Begin, End);
+}
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(1,
- num_args);
- void *Mem = C.Allocate(Size, alignof(UnresolvedLookupExpr));
- return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
- TemplateKWLoc, NameInfo,
- ADL, /*Overload*/ true, Args,
- Begin, End);
-}
-
-UnresolvedLookupExpr *
-UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
- bool HasTemplateKWAndArgsInfo,
- unsigned NumTemplateArgs) {
+UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
+ const ASTContext &Context, CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo, bool RequiresADL,
+ const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Args || TemplateKWLoc.isValid());
+ unsigned NumResults = End - Begin;
+ unsigned NumTemplateArgs = Args ? Args->size() : 0;
+ unsigned Size =
+ totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(NumResults, 1, NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
+ return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
+ TemplateKWLoc, NameInfo, RequiresADL,
+ /*Overloaded*/ true, Args, Begin, End);
+}
+
+UnresolvedLookupExpr *UnresolvedLookupExpr::CreateEmpty(
+ const ASTContext &Context, unsigned NumResults,
+ bool HasTemplateKWAndArgsInfo, unsigned NumTemplateArgs) {
assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
- HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, alignof(UnresolvedLookupExpr));
- auto *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
- E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
- return E;
+ unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(
+ NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
+ return new (Mem)
+ UnresolvedLookupExpr(EmptyShell(), NumResults, HasTemplateKWAndArgsInfo);
}
-OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
+OverloadExpr::OverloadExpr(StmtClass SC, const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
- UnresolvedSetIterator End,
- bool KnownDependent,
+ UnresolvedSetIterator End, bool KnownDependent,
bool KnownInstantiationDependent,
bool KnownContainsUnexpandedParameterPack)
- : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
- KnownDependent,
- (KnownInstantiationDependent ||
- NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
+ : Expr(
+ SC, Context.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
+ KnownDependent,
+ (KnownInstantiationDependent || NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (KnownContainsUnexpandedParameterPack ||
- NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
- NameInfo(NameInfo), QualifierLoc(QualifierLoc), NumResults(End - Begin),
- HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
- TemplateKWLoc.isValid()) {
- NumResults = End - Begin;
+ (KnownContainsUnexpandedParameterPack ||
+ NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ NameInfo(NameInfo), QualifierLoc(QualifierLoc) {
+ unsigned NumResults = End - Begin;
+ OverloadExprBits.NumResults = NumResults;
+ OverloadExprBits.HasTemplateKWAndArgsInfo =
+ (TemplateArgs != nullptr ) || TemplateKWLoc.isValid();
+
if (NumResults) {
// Determine whether this expression is type-dependent.
for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
@@ -331,8 +402,9 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
}
}
- Results = static_cast<DeclAccessPair *>(C.Allocate(
- sizeof(DeclAccessPair) * NumResults, alignof(DeclAccessPair)));
+ // Copy the results to the trailing array past UnresolvedLookupExpr
+ // or UnresolvedMemberExpr.
+ DeclAccessPair *Results = getTrailingResults();
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
@@ -360,48 +432,33 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
}
if (isTypeDependent())
- setType(C.DependentTy);
-}
-
-void OverloadExpr::initializeResults(const ASTContext &C,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
- assert(!Results && "Results already initialized!");
- NumResults = End - Begin;
- if (NumResults) {
- Results = static_cast<DeclAccessPair *>(
- C.Allocate(sizeof(DeclAccessPair) * NumResults,
-
- alignof(DeclAccessPair)));
- memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
- }
+ setType(Context.DependentTy);
}
-CXXRecordDecl *OverloadExpr::getNamingClass() const {
- if (isa<UnresolvedLookupExpr>(this))
- return cast<UnresolvedLookupExpr>(this)->getNamingClass();
- else
- return cast<UnresolvedMemberExpr>(this)->getNamingClass();
+OverloadExpr::OverloadExpr(StmtClass SC, EmptyShell Empty, unsigned NumResults,
+ bool HasTemplateKWAndArgsInfo)
+ : Expr(SC, Empty) {
+ OverloadExprBits.NumResults = NumResults;
+ OverloadExprBits.HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
}
// DependentScopeDeclRefExpr
-DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *Args)
- : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
- true, true,
- (NameInfo.isInstantiationDependent() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
- (NameInfo.containsUnexpandedParameterPack() ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()))),
- QualifierLoc(QualifierLoc), NameInfo(NameInfo),
- HasTemplateKWAndArgsInfo(Args != nullptr || TemplateKWLoc.isValid())
-{
+DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(
+ QualType Ty, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args)
+ : Expr(
+ DependentScopeDeclRefExprClass, Ty, VK_LValue, OK_Ordinary, true,
+ true,
+ (NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo) {
+ DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo =
+ (Args != nullptr) || TemplateKWLoc.isValid();
if (Args) {
bool Dependent = true;
bool InstantiationDependent = true;
@@ -417,57 +474,55 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
}
}
-DependentScopeDeclRefExpr *
-DependentScopeDeclRefExpr::Create(const ASTContext &C,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *Args) {
+DependentScopeDeclRefExpr *DependentScopeDeclRefExpr::Create(
+ const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args) {
assert(QualifierLoc && "should be created for dependent qualifiers");
bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid();
std::size_t Size =
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, Args ? Args->size() : 0);
- void *Mem = C.Allocate(Size);
- return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) DependentScopeDeclRefExpr(Context.DependentTy, QualifierLoc,
TemplateKWLoc, NameInfo, Args);
}
DependentScopeDeclRefExpr *
-DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C,
+DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &Context,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
std::size_t Size =
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size);
- auto *E =
- new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
- SourceLocation(),
- DeclarationNameInfo(), nullptr);
- E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ void *Mem = Context.Allocate(Size);
+ auto *E = new (Mem) DependentScopeDeclRefExpr(
+ QualType(), NestedNameSpecifierLoc(), SourceLocation(),
+ DeclarationNameInfo(), nullptr);
+ E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo =
+ HasTemplateKWAndArgsInfo;
return E;
}
-SourceLocation CXXConstructExpr::getLocStart() const {
+SourceLocation CXXConstructExpr::getBeginLoc() const {
if (isa<CXXTemporaryObjectExpr>(this))
- return cast<CXXTemporaryObjectExpr>(this)->getLocStart();
- return Loc;
+ return cast<CXXTemporaryObjectExpr>(this)->getBeginLoc();
+ return getLocation();
}
-SourceLocation CXXConstructExpr::getLocEnd() const {
+SourceLocation CXXConstructExpr::getEndLoc() const {
if (isa<CXXTemporaryObjectExpr>(this))
- return cast<CXXTemporaryObjectExpr>(this)->getLocEnd();
+ return cast<CXXTemporaryObjectExpr>(this)->getEndLoc();
if (ParenOrBraceRange.isValid())
return ParenOrBraceRange.getEnd();
- SourceLocation End = Loc;
+ SourceLocation End = getLocation();
for (unsigned I = getNumArgs(); I > 0; --I) {
const Expr *Arg = getArg(I-1);
if (!Arg->isDefaultArgument()) {
- SourceLocation NewEnd = Arg->getLocEnd();
+ SourceLocation NewEnd = Arg->getEndLoc();
if (NewEnd.isValid()) {
End = NewEnd;
break;
@@ -478,30 +533,110 @@ SourceLocation CXXConstructExpr::getLocEnd() const {
return End;
}
+CXXOperatorCallExpr::CXXOperatorCallExpr(OverloadedOperatorKind OpKind,
+ Expr *Fn, ArrayRef<Expr *> Args,
+ QualType Ty, ExprValueKind VK,
+ SourceLocation OperatorLoc,
+ FPOptions FPFeatures,
+ ADLCallKind UsesADL)
+ : CallExpr(CXXOperatorCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
+ OperatorLoc, /*MinNumArgs=*/0, UsesADL) {
+ CXXOperatorCallExprBits.OperatorKind = OpKind;
+ CXXOperatorCallExprBits.FPFeatures = FPFeatures.getInt();
+ assert(
+ (CXXOperatorCallExprBits.OperatorKind == static_cast<unsigned>(OpKind)) &&
+ "OperatorKind overflow!");
+ assert((CXXOperatorCallExprBits.FPFeatures == FPFeatures.getInt()) &&
+ "FPFeatures overflow!");
+ Range = getSourceRangeImpl();
+}
+
+CXXOperatorCallExpr::CXXOperatorCallExpr(unsigned NumArgs, EmptyShell Empty)
+ : CallExpr(CXXOperatorCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+
+CXXOperatorCallExpr *CXXOperatorCallExpr::Create(
+ const ASTContext &Ctx, OverloadedOperatorKind OpKind, Expr *Fn,
+ ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
+ SourceLocation OperatorLoc, FPOptions FPFeatures, ADLCallKind UsesADL) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned NumArgs = Args.size();
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CXXOperatorCallExpr) + SizeOfTrailingObjects,
+ alignof(CXXOperatorCallExpr));
+ return new (Mem) CXXOperatorCallExpr(OpKind, Fn, Args, Ty, VK, OperatorLoc,
+ FPFeatures, UsesADL);
+}
+
+CXXOperatorCallExpr *CXXOperatorCallExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumArgs,
+ EmptyShell Empty) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CXXOperatorCallExpr) + SizeOfTrailingObjects,
+ alignof(CXXOperatorCallExpr));
+ return new (Mem) CXXOperatorCallExpr(NumArgs, Empty);
+}
+
SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
OverloadedOperatorKind Kind = getOperator();
if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
if (getNumArgs() == 1)
// Prefix operator
- return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
+ return SourceRange(getOperatorLoc(), getArg(0)->getEndLoc());
else
// Postfix operator
- return SourceRange(getArg(0)->getLocStart(), getOperatorLoc());
+ return SourceRange(getArg(0)->getBeginLoc(), getOperatorLoc());
} else if (Kind == OO_Arrow) {
return getArg(0)->getSourceRange();
} else if (Kind == OO_Call) {
- return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
+ return SourceRange(getArg(0)->getBeginLoc(), getRParenLoc());
} else if (Kind == OO_Subscript) {
- return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
+ return SourceRange(getArg(0)->getBeginLoc(), getRParenLoc());
} else if (getNumArgs() == 1) {
- return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
+ return SourceRange(getOperatorLoc(), getArg(0)->getEndLoc());
} else if (getNumArgs() == 2) {
- return SourceRange(getArg(0)->getLocStart(), getArg(1)->getLocEnd());
+ return SourceRange(getArg(0)->getBeginLoc(), getArg(1)->getEndLoc());
} else {
return getOperatorLoc();
}
}
+CXXMemberCallExpr::CXXMemberCallExpr(Expr *Fn, ArrayRef<Expr *> Args,
+ QualType Ty, ExprValueKind VK,
+ SourceLocation RP, unsigned MinNumArgs)
+ : CallExpr(CXXMemberCallExprClass, Fn, /*PreArgs=*/{}, Args, Ty, VK, RP,
+ MinNumArgs, NotADL) {}
+
+CXXMemberCallExpr::CXXMemberCallExpr(unsigned NumArgs, EmptyShell Empty)
+ : CallExpr(CXXMemberCallExprClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+
+CXXMemberCallExpr *CXXMemberCallExpr::Create(const ASTContext &Ctx, Expr *Fn,
+ ArrayRef<Expr *> Args, QualType Ty,
+ ExprValueKind VK,
+ SourceLocation RP,
+ unsigned MinNumArgs) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CXXMemberCallExpr) + SizeOfTrailingObjects,
+ alignof(CXXMemberCallExpr));
+ return new (Mem) CXXMemberCallExpr(Fn, Args, Ty, VK, RP, MinNumArgs);
+}
+
+CXXMemberCallExpr *CXXMemberCallExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumArgs,
+ EmptyShell Empty) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CXXMemberCallExpr) + SizeOfTrailingObjects,
+ alignof(CXXMemberCallExpr));
+ return new (Mem) CXXMemberCallExpr(NumArgs, Empty);
+}
+
Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
const Expr *Callee = getCallee()->IgnoreParens();
if (const auto *MemExpr = dyn_cast<MemberExpr>(Callee))
@@ -559,9 +694,7 @@ CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
auto *E =
new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
@@ -573,9 +706,7 @@ CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
}
@@ -588,9 +719,7 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
auto *E =
new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
@@ -602,9 +731,7 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
}
@@ -649,9 +776,7 @@ CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
auto *E =
new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
@@ -663,9 +788,7 @@ CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
CXXReinterpretCastExpr *
CXXReinterpretCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
}
@@ -688,9 +811,7 @@ CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
const CXXCastPath *BasePath,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
auto *E =
new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
if (PathSize)
@@ -701,18 +822,52 @@ CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
- void *Buffer =
- C.Allocate(totalSizeToAlloc<CastExpr::BasePathSizeTy, CXXBaseSpecifier *>(
- PathSize ? 1 : 0, PathSize));
+ void *Buffer = C.Allocate(totalSizeToAlloc<CXXBaseSpecifier *>(PathSize));
return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
}
-SourceLocation CXXFunctionalCastExpr::getLocStart() const {
- return getTypeInfoAsWritten()->getTypeLoc().getLocStart();
+SourceLocation CXXFunctionalCastExpr::getBeginLoc() const {
+ return getTypeInfoAsWritten()->getTypeLoc().getBeginLoc();
+}
+
+SourceLocation CXXFunctionalCastExpr::getEndLoc() const {
+ return RParenLoc.isValid() ? RParenLoc : getSubExpr()->getEndLoc();
+}
+
+UserDefinedLiteral::UserDefinedLiteral(Expr *Fn, ArrayRef<Expr *> Args,
+ QualType Ty, ExprValueKind VK,
+ SourceLocation LitEndLoc,
+ SourceLocation SuffixLoc)
+ : CallExpr(UserDefinedLiteralClass, Fn, /*PreArgs=*/{}, Args, Ty, VK,
+ LitEndLoc, /*MinNumArgs=*/0, NotADL),
+ UDSuffixLoc(SuffixLoc) {}
+
+UserDefinedLiteral::UserDefinedLiteral(unsigned NumArgs, EmptyShell Empty)
+ : CallExpr(UserDefinedLiteralClass, /*NumPreArgs=*/0, NumArgs, Empty) {}
+
+UserDefinedLiteral *UserDefinedLiteral::Create(const ASTContext &Ctx, Expr *Fn,
+ ArrayRef<Expr *> Args,
+ QualType Ty, ExprValueKind VK,
+ SourceLocation LitEndLoc,
+ SourceLocation SuffixLoc) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned NumArgs = Args.size();
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(UserDefinedLiteral) + SizeOfTrailingObjects,
+ alignof(UserDefinedLiteral));
+ return new (Mem) UserDefinedLiteral(Fn, Args, Ty, VK, LitEndLoc, SuffixLoc);
}
-SourceLocation CXXFunctionalCastExpr::getLocEnd() const {
- return RParenLoc.isValid() ? RParenLoc : getSubExpr()->getLocEnd();
+UserDefinedLiteral *UserDefinedLiteral::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumArgs,
+ EmptyShell Empty) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/0, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(UserDefinedLiteral) + SizeOfTrailingObjects,
+ alignof(UserDefinedLiteral));
+ return new (Mem) UserDefinedLiteral(NumArgs, Empty);
}
UserDefinedLiteral::LiteralOperatorKind
@@ -749,14 +904,15 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
-CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &C, SourceLocation Loc,
- FieldDecl *Field, QualType T)
- : Expr(CXXDefaultInitExprClass, T.getNonLValueExprType(C),
- T->isLValueReferenceType() ? VK_LValue : T->isRValueReferenceType()
+CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
+ FieldDecl *Field, QualType Ty)
+ : Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
+ Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType()
? VK_XValue
: VK_RValue,
/*FIXME*/ OK_Ordinary, false, false, false, false),
- Field(Field), Loc(Loc) {
+ Field(Field) {
+ CXXDefaultInitExprBits.Loc = Loc;
assert(Field->hasInClassInitializer());
}
@@ -775,92 +931,118 @@ CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(const ASTContext &C,
return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
}
-CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(const ASTContext &C,
- CXXConstructorDecl *Cons,
- QualType Type,
- TypeSourceInfo *TSI,
- ArrayRef<Expr*> Args,
- SourceRange ParenOrBraceRange,
- bool HadMultipleCandidates,
- bool ListInitialization,
- bool StdInitListInitialization,
- bool ZeroInitialization)
- : CXXConstructExpr(C, CXXTemporaryObjectExprClass, Type,
- TSI->getTypeLoc().getBeginLoc(), Cons, false, Args,
- HadMultipleCandidates, ListInitialization,
- StdInitListInitialization, ZeroInitialization,
- CXXConstructExpr::CK_Complete, ParenOrBraceRange),
- Type(TSI) {}
-
-SourceLocation CXXTemporaryObjectExpr::getLocStart() const {
- return Type->getTypeLoc().getBeginLoc();
-}
-
-SourceLocation CXXTemporaryObjectExpr::getLocEnd() const {
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(
+ CXXConstructorDecl *Cons, QualType Ty, TypeSourceInfo *TSI,
+ ArrayRef<Expr *> Args, SourceRange ParenOrBraceRange,
+ bool HadMultipleCandidates, bool ListInitialization,
+ bool StdInitListInitialization, bool ZeroInitialization)
+ : CXXConstructExpr(
+ CXXTemporaryObjectExprClass, Ty, TSI->getTypeLoc().getBeginLoc(),
+ Cons, /* Elidable=*/false, Args, HadMultipleCandidates,
+ ListInitialization, StdInitListInitialization, ZeroInitialization,
+ CXXConstructExpr::CK_Complete, ParenOrBraceRange),
+ TSI(TSI) {}
+
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(EmptyShell Empty,
+ unsigned NumArgs)
+ : CXXConstructExpr(CXXTemporaryObjectExprClass, Empty, NumArgs) {}
+
+CXXTemporaryObjectExpr *CXXTemporaryObjectExpr::Create(
+ const ASTContext &Ctx, CXXConstructorDecl *Cons, QualType Ty,
+ TypeSourceInfo *TSI, ArrayRef<Expr *> Args, SourceRange ParenOrBraceRange,
+ bool HadMultipleCandidates, bool ListInitialization,
+ bool StdInitListInitialization, bool ZeroInitialization) {
+ unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(Args.size());
+ void *Mem =
+ Ctx.Allocate(sizeof(CXXTemporaryObjectExpr) + SizeOfTrailingObjects,
+ alignof(CXXTemporaryObjectExpr));
+ return new (Mem) CXXTemporaryObjectExpr(
+ Cons, Ty, TSI, Args, ParenOrBraceRange, HadMultipleCandidates,
+ ListInitialization, StdInitListInitialization, ZeroInitialization);
+}
+
+CXXTemporaryObjectExpr *
+CXXTemporaryObjectExpr::CreateEmpty(const ASTContext &Ctx, unsigned NumArgs) {
+ unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(NumArgs);
+ void *Mem =
+ Ctx.Allocate(sizeof(CXXTemporaryObjectExpr) + SizeOfTrailingObjects,
+ alignof(CXXTemporaryObjectExpr));
+ return new (Mem) CXXTemporaryObjectExpr(EmptyShell(), NumArgs);
+}
+
+SourceLocation CXXTemporaryObjectExpr::getBeginLoc() const {
+ return getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+}
+
+SourceLocation CXXTemporaryObjectExpr::getEndLoc() const {
SourceLocation Loc = getParenOrBraceRange().getEnd();
if (Loc.isInvalid() && getNumArgs())
- Loc = getArg(getNumArgs()-1)->getLocEnd();
+ Loc = getArg(getNumArgs() - 1)->getEndLoc();
return Loc;
}
-CXXConstructExpr *CXXConstructExpr::Create(const ASTContext &C, QualType T,
- SourceLocation Loc,
- CXXConstructorDecl *Ctor,
- bool Elidable,
- ArrayRef<Expr*> Args,
- bool HadMultipleCandidates,
- bool ListInitialization,
- bool StdInitListInitialization,
- bool ZeroInitialization,
- ConstructionKind ConstructKind,
- SourceRange ParenOrBraceRange) {
- return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc,
- Ctor, Elidable, Args,
- HadMultipleCandidates, ListInitialization,
- StdInitListInitialization,
- ZeroInitialization, ConstructKind,
- ParenOrBraceRange);
-}
-
-CXXConstructExpr::CXXConstructExpr(const ASTContext &C, StmtClass SC,
- QualType T, SourceLocation Loc,
- CXXConstructorDecl *Ctor,
- bool Elidable,
- ArrayRef<Expr*> Args,
- bool HadMultipleCandidates,
- bool ListInitialization,
- bool StdInitListInitialization,
- bool ZeroInitialization,
- ConstructionKind ConstructKind,
- SourceRange ParenOrBraceRange)
- : Expr(SC, T, VK_RValue, OK_Ordinary,
- T->isDependentType(), T->isDependentType(),
- T->isInstantiationDependentType(),
- T->containsUnexpandedParameterPack()),
- Constructor(Ctor), Loc(Loc), ParenOrBraceRange(ParenOrBraceRange),
- NumArgs(Args.size()), Elidable(Elidable),
- HadMultipleCandidates(HadMultipleCandidates),
- ListInitialization(ListInitialization),
- StdInitListInitialization(StdInitListInitialization),
- ZeroInitialization(ZeroInitialization), ConstructKind(ConstructKind) {
- if (NumArgs) {
- this->Args = new (C) Stmt*[Args.size()];
-
- for (unsigned i = 0; i != Args.size(); ++i) {
- assert(Args[i] && "NULL argument in CXXConstructExpr");
-
- if (Args[i]->isValueDependent())
- ExprBits.ValueDependent = true;
- if (Args[i]->isInstantiationDependent())
- ExprBits.InstantiationDependent = true;
- if (Args[i]->containsUnexpandedParameterPack())
- ExprBits.ContainsUnexpandedParameterPack = true;
+CXXConstructExpr *CXXConstructExpr::Create(
+ const ASTContext &Ctx, QualType Ty, SourceLocation Loc,
+ CXXConstructorDecl *Ctor, bool Elidable, ArrayRef<Expr *> Args,
+ bool HadMultipleCandidates, bool ListInitialization,
+ bool StdInitListInitialization, bool ZeroInitialization,
+ ConstructionKind ConstructKind, SourceRange ParenOrBraceRange) {
+ unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(Args.size());
+ void *Mem = Ctx.Allocate(sizeof(CXXConstructExpr) + SizeOfTrailingObjects,
+ alignof(CXXConstructExpr));
+ return new (Mem) CXXConstructExpr(
+ CXXConstructExprClass, Ty, Loc, Ctor, Elidable, Args,
+ HadMultipleCandidates, ListInitialization, StdInitListInitialization,
+ ZeroInitialization, ConstructKind, ParenOrBraceRange);
+}
+
+CXXConstructExpr *CXXConstructExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumArgs) {
+ unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CXXConstructExpr) + SizeOfTrailingObjects,
+ alignof(CXXConstructExpr));
+ return new (Mem)
+ CXXConstructExpr(CXXConstructExprClass, EmptyShell(), NumArgs);
+}
+
+CXXConstructExpr::CXXConstructExpr(
+ StmtClass SC, QualType Ty, SourceLocation Loc, CXXConstructorDecl *Ctor,
+ bool Elidable, ArrayRef<Expr *> Args, bool HadMultipleCandidates,
+ bool ListInitialization, bool StdInitListInitialization,
+ bool ZeroInitialization, ConstructionKind ConstructKind,
+ SourceRange ParenOrBraceRange)
+ : Expr(SC, Ty, VK_RValue, OK_Ordinary, Ty->isDependentType(),
+ Ty->isDependentType(), Ty->isInstantiationDependentType(),
+ Ty->containsUnexpandedParameterPack()),
+ Constructor(Ctor), ParenOrBraceRange(ParenOrBraceRange),
+ NumArgs(Args.size()) {
+ CXXConstructExprBits.Elidable = Elidable;
+ CXXConstructExprBits.HadMultipleCandidates = HadMultipleCandidates;
+ CXXConstructExprBits.ListInitialization = ListInitialization;
+ CXXConstructExprBits.StdInitListInitialization = StdInitListInitialization;
+ CXXConstructExprBits.ZeroInitialization = ZeroInitialization;
+ CXXConstructExprBits.ConstructionKind = ConstructKind;
+ CXXConstructExprBits.Loc = Loc;
+
+ Stmt **TrailingArgs = getTrailingArgs();
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ assert(Args[I] && "NULL argument in CXXConstructExpr!");
- this->Args[i] = Args[i];
- }
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ TrailingArgs[I] = Args[I];
}
}
+CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty,
+ unsigned NumArgs)
+ : Expr(SC, Empty), NumArgs(NumArgs) {}
+
LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
LambdaCaptureKind Kind, VarDecl *Var,
SourceLocation EllipsisLoc)
@@ -1044,12 +1226,7 @@ bool LambdaExpr::isMutable() const {
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
bool CleanupsHaveSideEffects,
ArrayRef<CleanupObject> objects)
- : Expr(ExprWithCleanupsClass, subexpr->getType(),
- subexpr->getValueKind(), subexpr->getObjectKind(),
- subexpr->isTypeDependent(), subexpr->isValueDependent(),
- subexpr->isInstantiationDependent(),
- subexpr->containsUnexpandedParameterPack()),
- SubExpr(subexpr) {
+ : FullExpr(ExprWithCleanupsClass, subexpr) {
ExprWithCleanupsBits.CleanupsHaveSideEffects = CleanupsHaveSideEffects;
ExprWithCleanupsBits.NumObjects = objects.size();
for (unsigned i = 0, e = objects.size(); i != e; ++i)
@@ -1066,7 +1243,7 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
}
ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
- : Expr(ExprWithCleanupsClass, empty) {
+ : FullExpr(ExprWithCleanupsClass, empty) {
ExprWithCleanupsBits.NumObjects = numObjects;
}
@@ -1078,22 +1255,22 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
return new (buffer) ExprWithCleanups(empty, numObjects);
}
-CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
- SourceLocation LParenLoc,
- ArrayRef<Expr*> Args,
- SourceLocation RParenLoc)
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *TSI,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc)
: Expr(CXXUnresolvedConstructExprClass,
- Type->getType().getNonReferenceType(),
- (Type->getType()->isLValueReferenceType()
+ TSI->getType().getNonReferenceType(),
+ (TSI->getType()->isLValueReferenceType()
? VK_LValue
- : Type->getType()->isRValueReferenceType() ? VK_XValue
- : VK_RValue),
+ : TSI->getType()->isRValueReferenceType() ? VK_XValue
+ : VK_RValue),
OK_Ordinary,
- Type->getType()->isDependentType() ||
- Type->getType()->getContainedDeducedType(),
- true, true, Type->getType()->containsUnexpandedParameterPack()),
- Type(Type), LParenLoc(LParenLoc), RParenLoc(RParenLoc),
- NumArgs(Args.size()) {
+ TSI->getType()->isDependentType() ||
+ TSI->getType()->getContainedDeducedType(),
+ true, true, TSI->getType()->containsUnexpandedParameterPack()),
+ TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ CXXUnresolvedConstructExprBits.NumArgs = Args.size();
auto **StoredArgs = getTrailingObjects<Expr *>();
for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->containsUnexpandedParameterPack())
@@ -1103,46 +1280,45 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
}
}
-CXXUnresolvedConstructExpr *
-CXXUnresolvedConstructExpr::Create(const ASTContext &C,
- TypeSourceInfo *Type,
- SourceLocation LParenLoc,
- ArrayRef<Expr*> Args,
- SourceLocation RParenLoc) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
- return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
+CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
+ const ASTContext &Context, TypeSourceInfo *TSI, SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args, SourceLocation RParenLoc) {
+ void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
+ return new (Mem) CXXUnresolvedConstructExpr(TSI, LParenLoc, Args, RParenLoc);
}
CXXUnresolvedConstructExpr *
-CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) {
- Stmt::EmptyShell Empty;
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumArgs));
- return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &Context,
+ unsigned NumArgs) {
+ void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(NumArgs));
+ return new (Mem) CXXUnresolvedConstructExpr(EmptyShell(), NumArgs);
}
-SourceLocation CXXUnresolvedConstructExpr::getLocStart() const {
- return Type->getTypeLoc().getBeginLoc();
+SourceLocation CXXUnresolvedConstructExpr::getBeginLoc() const {
+ return TSI->getTypeLoc().getBeginLoc();
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
- const ASTContext &C, Expr *Base, QualType BaseType, bool IsArrow,
+ const ASTContext &Ctx, Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
- : Expr(CXXDependentScopeMemberExprClass, C.DependentTy, VK_LValue,
+ : Expr(CXXDependentScopeMemberExprClass, Ctx.DependentTy, VK_LValue,
OK_Ordinary, true, true, true,
((Base && Base->containsUnexpandedParameterPack()) ||
- (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()) ||
+ (QualifierLoc && QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
MemberNameInfo.containsUnexpandedParameterPack())),
- Base(Base), BaseType(BaseType), IsArrow(IsArrow),
- HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
- TemplateKWLoc.isValid()),
- OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
- FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ Base(Base), BaseType(BaseType), QualifierLoc(QualifierLoc),
MemberNameInfo(MemberNameInfo) {
+ CXXDependentScopeMemberExprBits.IsArrow = IsArrow;
+ CXXDependentScopeMemberExprBits.HasTemplateKWAndArgsInfo =
+ (TemplateArgs != nullptr) || TemplateKWLoc.isValid();
+ CXXDependentScopeMemberExprBits.HasFirstQualifierFoundInScope =
+ FirstQualifierFoundInScope != nullptr;
+ CXXDependentScopeMemberExprBits.OperatorLoc = OperatorLoc;
+
if (TemplateArgs) {
bool Dependent = true;
bool InstantiationDependent = true;
@@ -1156,56 +1332,54 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+
+ if (hasFirstQualifierFoundInScope())
+ *getTrailingObjects<NamedDecl *>() = FirstQualifierFoundInScope;
}
-CXXDependentScopeMemberExpr *
-CXXDependentScopeMemberExpr::Create(const ASTContext &C,
- Expr *Base, QualType BaseType, bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- NamedDecl *FirstQualifierFoundInScope,
- DeclarationNameInfo MemberNameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
- bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
+ EmptyShell Empty, bool HasTemplateKWAndArgsInfo,
+ bool HasFirstQualifierFoundInScope)
+ : Expr(CXXDependentScopeMemberExprClass, Empty) {
+ CXXDependentScopeMemberExprBits.HasTemplateKWAndArgsInfo =
+ HasTemplateKWAndArgsInfo;
+ CXXDependentScopeMemberExprBits.HasFirstQualifierFoundInScope =
+ HasFirstQualifierFoundInScope;
+}
+
+CXXDependentScopeMemberExpr *CXXDependentScopeMemberExpr::Create(
+ const ASTContext &Ctx, Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ bool HasTemplateKWAndArgsInfo =
+ (TemplateArgs != nullptr) || TemplateKWLoc.isValid();
unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
- HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ bool HasFirstQualifierFoundInScope = FirstQualifierFoundInScope != nullptr;
- void *Mem = C.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
- return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
- IsArrow, OperatorLoc,
- QualifierLoc,
- TemplateKWLoc,
- FirstQualifierFoundInScope,
- MemberNameInfo, TemplateArgs);
+ unsigned Size = totalSizeToAlloc<ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc, NamedDecl *>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs, HasFirstQualifierFoundInScope);
+
+ void *Mem = Ctx.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
+ return new (Mem) CXXDependentScopeMemberExpr(
+ Ctx, Base, BaseType, IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
+ FirstQualifierFoundInScope, MemberNameInfo, TemplateArgs);
}
-CXXDependentScopeMemberExpr *
-CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
- bool HasTemplateKWAndArgsInfo,
- unsigned NumTemplateArgs) {
+CXXDependentScopeMemberExpr *CXXDependentScopeMemberExpr::CreateEmpty(
+ const ASTContext &Ctx, bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs, bool HasFirstQualifierFoundInScope) {
assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
- HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
- auto *E =
- new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
- false, SourceLocation(),
- NestedNameSpecifierLoc(),
- SourceLocation(), nullptr,
- DeclarationNameInfo(), nullptr);
- E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
- return E;
-}
-bool CXXDependentScopeMemberExpr::isImplicitAccess() const {
- if (!Base)
- return true;
+ unsigned Size = totalSizeToAlloc<ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc, NamedDecl *>(
+ HasTemplateKWAndArgsInfo, NumTemplateArgs, HasFirstQualifierFoundInScope);
- return cast<Expr>(Base)->isImplicitCXXThis();
+ void *Mem = Ctx.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
+ return new (Mem) CXXDependentScopeMemberExpr(
+ EmptyShell(), HasTemplateKWAndArgsInfo, HasFirstQualifierFoundInScope);
}
static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
@@ -1225,19 +1399,15 @@ static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
return true;
}
-UnresolvedMemberExpr::UnresolvedMemberExpr(const ASTContext &C,
- bool HasUnresolvedUsing,
- Expr *Base, QualType BaseType,
- bool IsArrow,
- SourceLocation OperatorLoc,
- NestedNameSpecifierLoc QualifierLoc,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &MemberNameInfo,
- const TemplateArgumentListInfo *TemplateArgs,
- UnresolvedSetIterator Begin,
- UnresolvedSetIterator End)
+UnresolvedMemberExpr::UnresolvedMemberExpr(
+ const ASTContext &Context, bool HasUnresolvedUsing, Expr *Base,
+ QualType BaseType, bool IsArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
: OverloadExpr(
- UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
+ UnresolvedMemberExprClass, Context, QualifierLoc, TemplateKWLoc,
MemberNameInfo, TemplateArgs, Begin, End,
// Dependent
((Base && Base->isTypeDependent()) || BaseType->isDependentType()),
@@ -1246,14 +1416,22 @@ UnresolvedMemberExpr::UnresolvedMemberExpr(const ASTContext &C,
// Contains unexpanded parameter pack
((Base && Base->containsUnexpandedParameterPack()) ||
BaseType->containsUnexpandedParameterPack())),
- IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing), Base(Base),
- BaseType(BaseType), OperatorLoc(OperatorLoc) {
+ Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
+ UnresolvedMemberExprBits.IsArrow = IsArrow;
+ UnresolvedMemberExprBits.HasUnresolvedUsing = HasUnresolvedUsing;
+
// Check whether all of the members are non-static member functions,
// and if so, mark give this bound-member type instead of overload type.
if (hasOnlyNonStaticMemberFunctions(Begin, End))
- setType(C.BoundMemberTy);
+ setType(Context.BoundMemberTy);
}
+UnresolvedMemberExpr::UnresolvedMemberExpr(EmptyShell Empty,
+ unsigned NumResults,
+ bool HasTemplateKWAndArgsInfo)
+ : OverloadExpr(UnresolvedMemberExprClass, Empty, NumResults,
+ HasTemplateKWAndArgsInfo) {}
+
bool UnresolvedMemberExpr::isImplicitAccess() const {
if (!Base)
return true;
@@ -1262,39 +1440,37 @@ bool UnresolvedMemberExpr::isImplicitAccess() const {
}
UnresolvedMemberExpr *UnresolvedMemberExpr::Create(
- const ASTContext &C, bool HasUnresolvedUsing, Expr *Base, QualType BaseType,
- bool IsArrow, SourceLocation OperatorLoc,
+ const ASTContext &Context, bool HasUnresolvedUsing, Expr *Base,
+ QualType BaseType, bool IsArrow, SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
UnresolvedSetIterator End) {
+ unsigned NumResults = End - Begin;
bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid();
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
- HasTemplateKWAndArgsInfo, TemplateArgs ? TemplateArgs->size() : 0);
-
- void *Mem = C.Allocate(Size, alignof(UnresolvedMemberExpr));
+ unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
+ unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(
+ NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, alignof(UnresolvedMemberExpr));
return new (Mem) UnresolvedMemberExpr(
- C, HasUnresolvedUsing, Base, BaseType, IsArrow, OperatorLoc, QualifierLoc,
- TemplateKWLoc, MemberNameInfo, TemplateArgs, Begin, End);
+ Context, HasUnresolvedUsing, Base, BaseType, IsArrow, OperatorLoc,
+ QualifierLoc, TemplateKWLoc, MemberNameInfo, TemplateArgs, Begin, End);
}
-UnresolvedMemberExpr *
-UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
- bool HasTemplateKWAndArgsInfo,
- unsigned NumTemplateArgs) {
+UnresolvedMemberExpr *UnresolvedMemberExpr::CreateEmpty(
+ const ASTContext &Context, unsigned NumResults,
+ bool HasTemplateKWAndArgsInfo, unsigned NumTemplateArgs) {
assert(NumTemplateArgs == 0 || HasTemplateKWAndArgsInfo);
- std::size_t Size =
- totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
- HasTemplateKWAndArgsInfo, NumTemplateArgs);
-
- void *Mem = C.Allocate(Size, alignof(UnresolvedMemberExpr));
- auto *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
- E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
- return E;
+ unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
+ TemplateArgumentLoc>(
+ NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
+ void *Mem = Context.Allocate(Size, alignof(UnresolvedMemberExpr));
+ return new (Mem)
+ UnresolvedMemberExpr(EmptyShell(), NumResults, HasTemplateKWAndArgsInfo);
}
-CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
+CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() {
// Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
// If there was a nested name specifier, it names the naming class.
@@ -1448,4 +1624,37 @@ TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
return new (Mem) TypeTraitExpr(EmptyShell());
}
-void ArrayTypeTraitExpr::anchor() {}
+CUDAKernelCallExpr::CUDAKernelCallExpr(Expr *Fn, CallExpr *Config,
+ ArrayRef<Expr *> Args, QualType Ty,
+ ExprValueKind VK, SourceLocation RP,
+ unsigned MinNumArgs)
+ : CallExpr(CUDAKernelCallExprClass, Fn, /*PreArgs=*/Config, Args, Ty, VK,
+ RP, MinNumArgs, NotADL) {}
+
+CUDAKernelCallExpr::CUDAKernelCallExpr(unsigned NumArgs, EmptyShell Empty)
+ : CallExpr(CUDAKernelCallExprClass, /*NumPreArgs=*/END_PREARG, NumArgs,
+ Empty) {}
+
+CUDAKernelCallExpr *
+CUDAKernelCallExpr::Create(const ASTContext &Ctx, Expr *Fn, CallExpr *Config,
+ ArrayRef<Expr *> Args, QualType Ty, ExprValueKind VK,
+ SourceLocation RP, unsigned MinNumArgs) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned NumArgs = std::max<unsigned>(Args.size(), MinNumArgs);
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/END_PREARG, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CUDAKernelCallExpr) + SizeOfTrailingObjects,
+ alignof(CUDAKernelCallExpr));
+ return new (Mem) CUDAKernelCallExpr(Fn, Config, Args, Ty, VK, RP, MinNumArgs);
+}
+
+CUDAKernelCallExpr *CUDAKernelCallExpr::CreateEmpty(const ASTContext &Ctx,
+ unsigned NumArgs,
+ EmptyShell Empty) {
+ // Allocate storage for the trailing objects of CallExpr.
+ unsigned SizeOfTrailingObjects =
+ CallExpr::sizeOfTrailingObjects(/*NumPreArgs=*/END_PREARG, NumArgs);
+ void *Mem = Ctx.Allocate(sizeof(CUDAKernelCallExpr) + SizeOfTrailingObjects,
+ alignof(CUDAKernelCallExpr));
+ return new (Mem) CUDAKernelCallExpr(NumArgs, Empty);
+}
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index e50dd9c79d11..e1d6a1c9edcc 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -194,6 +194,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::DesignatedInitUpdateExprClass:
return Cl::CL_PRValue;
+ case Expr::ConstantExprClass:
+ return ClassifyInternal(Ctx, cast<ConstantExpr>(E)->getSubExpr());
+
// Next come the complicated cases.
case Expr::SubstNonTypeTemplateParmExprClass:
return ClassifyInternal(Ctx,
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 44cf75dbd25b..da093ff22c12 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -39,11 +39,13 @@
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/OSLog.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
@@ -143,8 +145,8 @@ namespace {
// If we're doing a variable assignment from e.g. malloc(N), there will
// probably be a cast of some kind. In exotic cases, we might also see a
// top-level ExprWithCleanups. Ignore them either way.
- if (const auto *EC = dyn_cast<ExprWithCleanups>(E))
- E = EC->getSubExpr()->IgnoreParens();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr()->IgnoreParens();
if (const auto *Cast = dyn_cast<CastExpr>(E))
E = Cast->getSubExpr()->IgnoreParens();
@@ -350,6 +352,7 @@ namespace {
/// Get the type of the designated object.
QualType getType(ASTContext &Ctx) const {
+ assert(!Invalid && "invalid designator has no subobject type");
return MostDerivedPathLength == Entries.size()
? MostDerivedType
: Ctx.getRecordType(getAsBaseClass(Entries.back()));
@@ -504,7 +507,7 @@ namespace {
}
// FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
- // on the overall stack usage of deeply-recursing constexpr evaluataions.
+ // on the overall stack usage of deeply-recursing constexpr evaluations.
// (We should cache this map rather than recomputing it repeatedly.)
// But let's try this and see how it goes; we can look into caching the map
// as a later change.
@@ -719,6 +722,10 @@ namespace {
/// Whether or not we're currently speculatively evaluating.
bool IsSpeculativelyEvaluating;
+ /// Whether or not we're in a context where the front end requires a
+ /// constant value.
+ bool InConstantContext;
+
enum EvaluationMode {
/// Evaluate as a constant expression. Stop if we find that the expression
/// is not a constant expression.
@@ -758,18 +765,6 @@ namespace {
/// context we try to fold them immediately since the optimizer never
/// gets a chance to look at it.
EM_PotentialConstantExpressionUnevaluated,
-
- /// Evaluate as a constant expression. In certain scenarios, if:
- /// - we find a MemberExpr with a base that can't be evaluated, or
- /// - we find a variable initialized with a call to a function that has
- /// the alloc_size attribute on it
- /// then we may consider evaluation to have succeeded.
- ///
- /// In either case, the LValue returned shall have an invalid base; in the
- /// former, the base will be the invalid MemberExpr, in the latter, the
- /// base will be either the alloc_size CallExpr or a CastExpr wrapping
- /// said CallExpr.
- EM_OffsetFold,
} EvalMode;
/// Are we checking whether the expression is a potential constant
@@ -792,7 +787,7 @@ namespace {
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
HasFoldFailureDiagnostic(false), IsSpeculativelyEvaluating(false),
- EvalMode(Mode) {}
+ InConstantContext(false), EvalMode(Mode) {}
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value) {
EvaluatingDecl = Base;
@@ -831,7 +826,7 @@ namespace {
bool nextStep(const Stmt *S) {
if (!StepsLeft) {
- FFDiag(S->getLocStart(), diag::note_constexpr_step_limit_exceeded);
+ FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded);
return false;
}
--StepsLeft;
@@ -873,7 +868,6 @@ namespace {
case EM_PotentialConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_PotentialConstantExpressionUnevaluated:
- case EM_OffsetFold:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -965,7 +959,6 @@ namespace {
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
- case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -984,7 +977,6 @@ namespace {
case EM_EvaluateForOverflow:
case EM_IgnoreSideEffects:
case EM_ConstantFold:
- case EM_OffsetFold:
return true;
case EM_PotentialConstantExpression:
@@ -1020,7 +1012,6 @@ namespace {
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
- case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -1092,18 +1083,18 @@ namespace {
}
};
- /// RAII object used to treat the current evaluation as the correct pointer
- /// offset fold for the current EvalMode
- struct FoldOffsetRAII {
+ /// RAII object used to set the current evaluation mode to ignore
+ /// side-effects.
+ struct IgnoreSideEffectsRAII {
EvalInfo &Info;
EvalInfo::EvaluationMode OldMode;
- explicit FoldOffsetRAII(EvalInfo &Info)
+ explicit IgnoreSideEffectsRAII(EvalInfo &Info)
: Info(Info), OldMode(Info.EvalMode) {
if (!Info.checkingPotentialConstantExpression())
- Info.EvalMode = EvalInfo::EM_OffsetFold;
+ Info.EvalMode = EvalInfo::EM_IgnoreSideEffects;
}
- ~FoldOffsetRAII() { Info.EvalMode = OldMode; }
+ ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
};
/// RAII object used to optionally suppress diagnostics and side-effects from
@@ -1299,6 +1290,14 @@ void EvalInfo::addCallStack(unsigned Limit) {
}
}
+/// Kinds of access we can perform on an object, for diagnostics.
+enum AccessKinds {
+ AK_Read,
+ AK_Assign,
+ AK_Increment,
+ AK_Decrement
+};
+
namespace {
struct ComplexValue {
private:
@@ -1404,21 +1403,36 @@ namespace {
set(B, true);
}
+ private:
// Check that this LValue is not based on a null pointer. If it is, produce
// a diagnostic and mark the designator as invalid.
- bool checkNullPointer(EvalInfo &Info, const Expr *E,
- CheckSubobjectKind CSK) {
+ template <typename GenDiagType>
+ bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
if (Designator.Invalid)
return false;
if (IsNullPtr) {
- Info.CCEDiag(E, diag::note_constexpr_null_subobject)
- << CSK;
+ GenDiag();
Designator.setInvalid();
return false;
}
return true;
}
+ public:
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ return checkNullPointerDiagnosingWith([&Info, E, CSK] {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
+ });
+ }
+
+ bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
+ AccessKinds AK) {
+ return checkNullPointerDiagnosingWith([&Info, E, AK] {
+ Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
+ });
+ }
+
// Check this LValue refers to an object. If not, set the designator to be
// invalid and emit a diagnostic.
bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
@@ -2088,11 +2102,12 @@ static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
QualType DestType, QualType SrcType,
const APSInt &Value) {
unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
- APSInt Result = Value;
// Figure out if this is a truncate, extend or noop cast.
// If the input is signed, do a sign extend, noop, or truncate.
- Result = Result.extOrTrunc(DestWidth);
+ APSInt Result = Value.extOrTrunc(DestWidth);
Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
+ if (DestType->isBooleanType())
+ Result = Value.getBoolValue();
return Result;
}
@@ -2553,8 +2568,8 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
if (Info.checkingPotentialConstantExpression())
return false;
// FIXME: implement capture evaluation during constant expr evaluation.
- Info.FFDiag(E->getLocStart(),
- diag::note_unimplemented_constexpr_lambda_feature_ast)
+ Info.FFDiag(E->getBeginLoc(),
+ diag::note_unimplemented_constexpr_lambda_feature_ast)
<< "captures not currently allowed";
return false;
}
@@ -2754,14 +2769,6 @@ static bool diagnoseUnreadableFields(EvalInfo &Info, const Expr *E,
return false;
}
-/// Kinds of access we can perform on an object, for diagnostics.
-enum AccessKinds {
- AK_Read,
- AK_Assign,
- AK_Increment,
- AK_Decrement
-};
-
namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
@@ -3432,19 +3439,31 @@ struct CompoundAssignSubobjectHandler {
if (!checkConst(SubobjType))
return false;
- if (!SubobjType->isIntegerType() || !RHS.isInt()) {
+ if (!SubobjType->isIntegerType()) {
// We don't support compound assignment on integer-cast-to-pointer
// values.
Info.FFDiag(E);
return false;
}
- APSInt LHS = HandleIntToIntCast(Info, E, PromotedLHSType,
- SubobjType, Value);
- if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS))
- return false;
- Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
- return true;
+ if (RHS.isInt()) {
+ APSInt LHS =
+ HandleIntToIntCast(Info, E, PromotedLHSType, SubobjType, Value);
+ if (!handleIntIntBinOp(Info, E, LHS, Opcode, RHS.getInt(), LHS))
+ return false;
+ Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
+ return true;
+ } else if (RHS.isFloat()) {
+ APFloat FValue(0.0);
+ return HandleIntToFloatCast(Info, E, SubobjType, Value, PromotedLHSType,
+ FValue) &&
+ handleFloatFloatBinOp(Info, E, FValue, Opcode, RHS.getFloat()) &&
+ HandleFloatToIntCast(Info, E, PromotedLHSType, FValue, SubobjType,
+ Value);
+ }
+
+ Info.FFDiag(E);
+ return false;
}
bool found(APFloat &Value, QualType SubobjType) {
return checkConst(SubobjType) &&
@@ -3844,8 +3863,8 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
const Expr *InitE = VD->getInit();
if (!InitE) {
- Info.FFDiag(VD->getLocStart(), diag::note_constexpr_uninitialized)
- << false << VD->getType();
+ Info.FFDiag(VD->getBeginLoc(), diag::note_constexpr_uninitialized)
+ << false << VD->getType();
Val = APValue();
return false;
}
@@ -3990,7 +4009,8 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
case ESR_CaseNotFound:
// This can only happen if the switch case is nested within a statement
// expression. We have no intention of supporting that.
- Info.FFDiag(Found->getLocStart(), diag::note_constexpr_stmt_expr_unsupported);
+ Info.FFDiag(Found->getBeginLoc(),
+ diag::note_constexpr_stmt_expr_unsupported);
return ESR_Failed;
}
llvm_unreachable("Invalid EvalStmtResult!");
@@ -4081,7 +4101,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR_Succeeded;
}
- Info.FFDiag(S->getLocStart());
+ Info.FFDiag(S->getBeginLoc());
return ESR_Failed;
case Stmt::NullStmtClass:
@@ -4215,6 +4235,13 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const CXXForRangeStmt *FS = cast<CXXForRangeStmt>(S);
BlockScopeRAII Scope(Info);
+ // Evaluate the init-statement if present.
+ if (FS->getInit()) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getInit());
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+
// Initialize the __range variable.
EvalStmtResult ESR = EvaluateStmt(Result, Info, FS->getRangeStmt());
if (ESR != ESR_Succeeded)
@@ -4279,6 +4306,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
return EvaluateStmt(Result, Info, cast<SwitchCase>(S)->getSubStmt(), Case);
+ case Stmt::CXXTryStmtClass:
+ // Evaluate try blocks by evaluating all sub statements.
+ return EvaluateStmt(Result, Info, cast<CXXTryStmt>(S)->getTryBlock(), Case);
}
}
@@ -4321,10 +4351,13 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
Declaration->isConstexpr())
return false;
- // Bail out with no diagnostic if the function declaration itself is invalid.
- // We will have produced a relevant diagnostic while parsing it.
- if (Declaration->isInvalidDecl())
+ // Bail out if the function declaration itself is invalid. We will
+ // have produced a relevant diagnostic while parsing it, so just
+ // note the problematic sub-expression.
+ if (Declaration->isInvalidDecl()) {
+ Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
return false;
+ }
// Can we evaluate this function call?
if (Definition && Definition->isConstexpr() &&
@@ -4429,7 +4462,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (!handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
RHS, RHSValue))
return false;
- if (!handleAssignment(Info, Args[0], *This, MD->getThisType(Info.Ctx),
+ if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
RHSValue))
return false;
This->moveInto(Result);
@@ -4451,7 +4484,7 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (ESR == ESR_Succeeded) {
if (Callee->getReturnType()->isVoidType())
return true;
- Info.FFDiag(Callee->getLocEnd(), diag::note_constexpr_no_return);
+ Info.FFDiag(Callee->getEndLoc(), diag::note_constexpr_no_return);
}
return ESR == ESR_Returned;
}
@@ -4722,6 +4755,8 @@ public:
return Error(E);
}
+ bool VisitConstantExpr(const ConstantExpr *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitParenExpr(const ParenExpr *E)
{ return StmtVisitorTy::Visit(E->getSubExpr()); }
bool VisitUnaryExtension(const UnaryOperator *E)
@@ -5076,8 +5111,8 @@ public:
if (BI + 1 == BE) {
const Expr *FinalExpr = dyn_cast<Expr>(*BI);
if (!FinalExpr) {
- Info.FFDiag((*BI)->getLocStart(),
- diag::note_constexpr_stmt_expr_unsupported);
+ Info.FFDiag((*BI)->getBeginLoc(),
+ diag::note_constexpr_stmt_expr_unsupported);
return false;
}
return this->Visit(FinalExpr);
@@ -5091,8 +5126,8 @@ public:
// 'break', or 'continue', it would be nice to propagate that to
// the outer statement evaluation rather than bailing out.
if (ESR != ESR_Failed)
- Info.FFDiag((*BI)->getLocStart(),
- diag::note_constexpr_stmt_expr_unsupported);
+ Info.FFDiag((*BI)->getBeginLoc(),
+ diag::note_constexpr_stmt_expr_unsupported);
return false;
}
}
@@ -5625,8 +5660,10 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
return false;
auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
- if (!E->EvaluateAsInt(Into, Ctx, Expr::SE_AllowSideEffects))
+ Expr::EvalResult ExprResult;
+ if (!E->EvaluateAsInt(ExprResult, Ctx, Expr::SE_AllowSideEffects))
return false;
+ Into = ExprResult.Val.getInt();
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
return false;
Into = Into.zextOrSelf(BitsInSizeT);
@@ -5852,11 +5889,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
- // If we changed anything other than cvr-qualifiers, we can't use this
- // value for constant folding. FIXME: Qualification conversions should
- // always be CK_NoOp, but we get this wrong in C.
- if (!Info.Ctx.hasCvrSimilarType(E->getType(), E->getSubExpr()->getType()))
- Result.Designator.setInvalid();
+ Result.Designator.setInvalid();
if (SubExpr->getType()->isVoidPointerType())
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< 3 << SubExpr->getType();
@@ -5954,21 +5987,35 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
-static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
+static CharUnits GetAlignOfType(EvalInfo &Info, QualType T,
+ UnaryExprOrTypeTrait ExprKind) {
// C++ [expr.alignof]p3:
// When alignof is applied to a reference type, the result is the
// alignment of the referenced type.
if (const ReferenceType *Ref = T->getAs<ReferenceType>())
T = Ref->getPointeeType();
- // __alignof is defined to return the preferred alignment.
if (T.getQualifiers().hasUnaligned())
return CharUnits::One();
- return Info.Ctx.toCharUnitsFromBits(
- Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+
+ const bool AlignOfReturnsPreferred =
+ Info.Ctx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
+
+ // __alignof is defined to return the preferred alignment.
+ // Before 8, clang returned the preferred alignment for alignof and _Alignof
+ // as well.
+ if (ExprKind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+ // alignof and _Alignof are defined to return the ABI alignment.
+ else if (ExprKind == UETT_AlignOf)
+ return Info.Ctx.getTypeAlignInChars(T.getTypePtr());
+ else
+ llvm_unreachable("GetAlignOfType on a non-alignment ExprKind");
}
-static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
+static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E,
+ UnaryExprOrTypeTrait ExprKind) {
E = E->IgnoreParens();
// The kinds of expressions that we have special-case logic here for
@@ -5985,7 +6032,7 @@ static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
/*RefAsPointee*/true);
- return GetAlignOfType(Info, E->getType());
+ return GetAlignOfType(Info, E->getType(), ExprKind);
}
// To be clear: this happily visits unsupported builtins. Better name welcomed.
@@ -6046,8 +6093,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
OffsetResult.Base.dyn_cast<const ValueDecl*>()) {
BaseAlignment = Info.Ctx.getDeclAlign(VD);
} else {
- BaseAlignment =
- GetAlignOfExpr(Info, OffsetResult.Base.get<const Expr*>());
+ BaseAlignment = GetAlignOfExpr(
+ Info, OffsetResult.Base.get<const Expr *>(), UETT_AlignOf);
}
if (BaseAlignment < Align) {
@@ -6077,7 +6124,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return true;
}
-
+ case Builtin::BI__builtin_launder:
+ return evaluatePointer(E->getArg(0), Result);
case Builtin::BIstrchr:
case Builtin::BIwcschr:
case Builtin::BImemchr:
@@ -6109,9 +6157,27 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
MaxLength = N.getExtValue();
}
-
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
-
+ // We cannot find the value if there are no candidates to match against.
+ if (MaxLength == 0u)
+ return ZeroInitialization(E);
+ if (!Result.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ Result.Designator.Invalid)
+ return false;
+ QualType CharTy = Result.Designator.getType(Info.Ctx);
+ bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
+ BuiltinOp == Builtin::BI__builtin_memchr;
+ assert(IsRawByte ||
+ Info.Ctx.hasSameUnqualifiedType(
+ CharTy, E->getArg(0)->getType()->getPointeeType()));
+ // Pointers to const void may point to objects of incomplete type.
+ if (IsRawByte && CharTy->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy;
+ return false;
+ }
+ // Give up on byte-oriented matching against multibyte elements.
+ // FIXME: We can compare the bytes in the correct order.
+ if (IsRawByte && Info.Ctx.getTypeSizeInChars(CharTy) != CharUnits::One())
+ return false;
// Figure out what value we're actually looking for (after converting to
// the corresponding unsigned type if necessary).
uint64_t DesiredVal;
@@ -6207,6 +6273,20 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!N)
return true;
+ // Otherwise, if either of the operands is null, we can't proceed. Don't
+ // try to determine the type of the copied objects, because there aren't
+ // any.
+ if (!Src.Base || !Dest.Base) {
+ APValue Val;
+ (!Src.Base ? Src : Dest).moveInto(Val);
+ Info.FFDiag(E, diag::note_constexpr_memcpy_null)
+ << Move << WChar << !!Src.Base
+ << Val.getAsString(Info.Ctx, E->getArg(0)->getType());
+ return false;
+ }
+ if (Src.Designator.Invalid || Dest.Designator.Invalid)
+ return false;
+
// We require that Src and Dest are both pointers to arrays of
// trivially-copyable type. (For the wide version, the designator will be
// invalid if the designated object is not a wchar_t.)
@@ -6216,6 +6296,10 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Info.FFDiag(E, diag::note_constexpr_memcpy_type_pun) << Move << SrcT << T;
return false;
}
+ if (T->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_memcpy_incomplete_type) << Move << T;
+ return false;
+ }
if (!T.isTriviallyCopyableType(Info.Ctx)) {
Info.FFDiag(E, diag::note_constexpr_memcpy_nontrivial) << Move << T;
return false;
@@ -7320,6 +7404,8 @@ public:
// Visitor Methods
//===--------------------------------------------------------------------===//
+ bool VisitConstantExpr(const ConstantExpr *E);
+
bool VisitIntegerLiteral(const IntegerLiteral *E) {
return Success(E->getValue(), E);
}
@@ -7619,6 +7705,9 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -8023,7 +8112,7 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
// If there are any, but we can determine the pointed-to object anyway, then
// ignore the side-effects.
SpeculativeEvaluationRAII SpeculativeEval(Info);
- FoldOffsetRAII Fold(Info);
+ IgnoreSideEffectsRAII Fold(Info);
if (E->isGLValue()) {
// It's possible for us to be given GLValues if we're called via
@@ -8057,6 +8146,11 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
return true;
}
+bool IntExprEvaluator::VisitConstantExpr(const ConstantExpr *E) {
+ llvm::SaveAndRestore<bool> InConstantContext(Info.InConstantContext, true);
+ return ExprEvaluatorBaseTy::VisitConstantExpr(E);
+}
+
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (unsigned BuiltinOp = E->getBuiltinCallee())
return VisitBuiltinCallExpr(E, BuiltinOp);
@@ -8091,7 +8185,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case EvalInfo::EM_ConstantFold:
case EvalInfo::EM_EvaluateForOverflow:
case EvalInfo::EM_IgnoreSideEffects:
- case EvalInfo::EM_OffsetFold:
// Leave it to IR generation.
return Error(E);
case EvalInfo::EM_ConstantExpressionUnevaluated:
@@ -8103,6 +8196,12 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
llvm_unreachable("unexpected EvalMode");
}
+ case Builtin::BI__builtin_os_log_format_buffer_size: {
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(Info.Ctx, E, Layout);
+ return Success(Layout.size().getQuantity(), E);
+ }
+
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -8116,9 +8215,15 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BI__builtin_classify_type:
return Success((int)EvaluateBuiltinClassifyType(E, Info.getLangOpts()), E);
- // FIXME: BI__builtin_clrsb
- // FIXME: BI__builtin_clrsbl
- // FIXME: BI__builtin_clrsbll
+ case Builtin::BI__builtin_clrsb:
+ case Builtin::BI__builtin_clrsbl:
+ case Builtin::BI__builtin_clrsbll: {
+ APSInt Val;
+ if (!EvaluateInteger(E->getArg(0), Val, Info))
+ return false;
+
+ return Success(Val.getBitWidth() - Val.getMinSignedBits(), E);
+ }
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
@@ -8133,8 +8238,20 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(Val.countLeadingZeros(), E);
}
- case Builtin::BI__builtin_constant_p:
- return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+ case Builtin::BI__builtin_constant_p: {
+ auto Arg = E->getArg(0);
+ if (EvaluateBuiltinConstantP(Info.Ctx, Arg))
+ return Success(true, E);
+ auto ArgTy = Arg->IgnoreImplicit()->getType();
+ if (!Info.InConstantContext && !Arg->HasSideEffects(Info.Ctx) &&
+ !ArgTy->isAggregateType() && !ArgTy->isPointerType()) {
+ // We can delay calculation of __builtin_constant_p until after
+ // inlining. Note: This diagnostic won't be shown to the user.
+ Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ return Success(false, E);
+ }
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -8314,8 +8431,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
!EvaluatePointer(E->getArg(1), String2, Info))
return false;
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
-
uint64_t MaxLength = uint64_t(-1);
if (BuiltinOp != Builtin::BIstrcmp &&
BuiltinOp != Builtin::BIwcscmp &&
@@ -8326,6 +8441,88 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
MaxLength = N.getExtValue();
}
+
+ // Empty substrings compare equal by definition.
+ if (MaxLength == 0u)
+ return Success(0, E);
+
+ if (!String1.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ !String2.checkNullPointerForFoldAccess(Info, E, AK_Read) ||
+ String1.Designator.Invalid || String2.Designator.Invalid)
+ return false;
+
+ QualType CharTy1 = String1.Designator.getType(Info.Ctx);
+ QualType CharTy2 = String2.Designator.getType(Info.Ctx);
+
+ bool IsRawByte = BuiltinOp == Builtin::BImemcmp ||
+ BuiltinOp == Builtin::BI__builtin_memcmp;
+
+ assert(IsRawByte ||
+ (Info.Ctx.hasSameUnqualifiedType(
+ CharTy1, E->getArg(0)->getType()->getPointeeType()) &&
+ Info.Ctx.hasSameUnqualifiedType(CharTy1, CharTy2)));
+
+ const auto &ReadCurElems = [&](APValue &Char1, APValue &Char2) {
+ return handleLValueToRValueConversion(Info, E, CharTy1, String1, Char1) &&
+ handleLValueToRValueConversion(Info, E, CharTy2, String2, Char2) &&
+ Char1.isInt() && Char2.isInt();
+ };
+ const auto &AdvanceElems = [&] {
+ return HandleLValueArrayAdjustment(Info, E, String1, CharTy1, 1) &&
+ HandleLValueArrayAdjustment(Info, E, String2, CharTy2, 1);
+ };
+
+ if (IsRawByte) {
+ uint64_t BytesRemaining = MaxLength;
+ // Pointers to const void may point to objects of incomplete type.
+ if (CharTy1->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy1;
+ return false;
+ }
+ if (CharTy2->isIncompleteType()) {
+ Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy2;
+ return false;
+ }
+ uint64_t CharTy1Width{Info.Ctx.getTypeSize(CharTy1)};
+ CharUnits CharTy1Size = Info.Ctx.toCharUnitsFromBits(CharTy1Width);
+ // Give up on comparing between elements with disparate widths.
+ if (CharTy1Size != Info.Ctx.getTypeSizeInChars(CharTy2))
+ return false;
+ uint64_t BytesPerElement = CharTy1Size.getQuantity();
+ assert(BytesRemaining && "BytesRemaining should not be zero: the "
+ "following loop considers at least one element");
+ while (true) {
+ APValue Char1, Char2;
+ if (!ReadCurElems(Char1, Char2))
+ return false;
+ // We have compatible in-memory widths, but a possible type and
+ // (for `bool`) internal representation mismatch.
+ // Assuming two's complement representation, including 0 for `false` and
+ // 1 for `true`, we can check an appropriate number of elements for
+ // equality even if they are not byte-sized.
+ APSInt Char1InMem = Char1.getInt().extOrTrunc(CharTy1Width);
+ APSInt Char2InMem = Char2.getInt().extOrTrunc(CharTy1Width);
+ if (Char1InMem.ne(Char2InMem)) {
+ // If the elements are byte-sized, then we can produce a three-way
+ // comparison result in a straightforward manner.
+ if (BytesPerElement == 1u) {
+ // memcmp always compares unsigned chars.
+ return Success(Char1InMem.ult(Char2InMem) ? -1 : 1, E);
+ }
+ // The result is byte-order sensitive, and we have multibyte elements.
+ // FIXME: We can compare the remaining bytes in the correct order.
+ return false;
+ }
+ if (!AdvanceElems())
+ return false;
+ if (BytesRemaining <= BytesPerElement)
+ break;
+ BytesRemaining -= BytesPerElement;
+ }
+ // Enough elements are equal to account for the memcmp limit.
+ return Success(0, E);
+ }
+
bool StopAtNull = (BuiltinOp != Builtin::BImemcmp &&
BuiltinOp != Builtin::BIwmemcmp &&
BuiltinOp != Builtin::BI__builtin_memcmp &&
@@ -8336,11 +8533,10 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
BuiltinOp == Builtin::BI__builtin_wcscmp ||
BuiltinOp == Builtin::BI__builtin_wcsncmp ||
BuiltinOp == Builtin::BI__builtin_wmemcmp;
+
for (; MaxLength; --MaxLength) {
APValue Char1, Char2;
- if (!handleLValueToRValueConversion(Info, E, CharTy, String1, Char1) ||
- !handleLValueToRValueConversion(Info, E, CharTy, String2, Char2) ||
- !Char1.isInt() || !Char2.isInt())
+ if (!ReadCurElems(Char1, Char2))
return false;
if (Char1.getInt() != Char2.getInt()) {
if (IsWide) // wmemcmp compares with wchar_t signedness.
@@ -8351,8 +8547,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (StopAtNull && !Char1.getInt())
return Success(0, E);
assert(!(StopAtNull && !Char2.getInt()));
- if (!HandleLValueArrayAdjustment(Info, E, String1, CharTy, 1) ||
- !HandleLValueArrayAdjustment(Info, E, String2, CharTy, 1))
+ if (!AdvanceElems())
return false;
}
// We hit the strncmp / memcmp limit.
@@ -9343,11 +9538,14 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
switch(E->getKind()) {
+ case UETT_PreferredAlignOf:
case UETT_AlignOf: {
if (E->isArgumentType())
- return Success(GetAlignOfType(Info, E->getArgumentType()), E);
+ return Success(GetAlignOfType(Info, E->getArgumentType(), E->getKind()),
+ E);
else
- return Success(GetAlignOfExpr(Info, E->getArgumentExpr()), E);
+ return Success(GetAlignOfExpr(Info, E->getArgumentExpr(), E->getKind()),
+ E);
}
case UETT_VecStep: {
@@ -9536,11 +9734,11 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -9575,6 +9773,14 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Success(IntResult, E);
}
+ case CK_FixedPointToBoolean: {
+ // Unsigned padding does not affect this.
+ APValue Val;
+ if (!Evaluate(Val, Info, SubExpr))
+ return false;
+ return Success(Val.getInt().getBoolValue(), E);
+ }
+
case CK_IntegralCast: {
if (!Visit(SubExpr))
return false;
@@ -9697,8 +9903,7 @@ bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
SmallString<64> S;
FixedPointValueToString(S, Value,
- Info.Ctx.getTypeInfo(E->getType()).Width,
- /*Radix=*/10);
+ Info.Ctx.getTypeInfo(E->getType()).Width);
Info.CCEDiag(E, diag::note_constexpr_overflow) << S << E->getType();
if (Info.noteUndefinedBehavior()) return false;
}
@@ -10071,11 +10276,12 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -10236,7 +10442,7 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
case BO_Mul:
if (Result.isComplexFloat()) {
// This is an implementation of complex multiplication according to the
- // constraints laid out in C11 Annex G. The implemention uses the
+ // constraints laid out in C11 Annex G. The implementation uses the
// following naming scheme:
// (a + ib) * (c + id)
ComplexValue LHS = Result;
@@ -10317,7 +10523,7 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
case BO_Div:
if (Result.isComplexFloat()) {
// This is an implementation of complex division according to the
- // constraints laid out in C11 Annex G. The implemention uses the
+ // constraints laid out in C11 Annex G. The implementation uses the
// following naming scheme:
// (a + ib) / (c + id)
ComplexValue LHS = Result;
@@ -10693,19 +10899,46 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
return false;
}
+static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
+ Expr::SideEffectsKind SEK) {
+ return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
+ (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
+}
+
+static bool EvaluateAsRValue(const Expr *E, Expr::EvalResult &Result,
+ const ASTContext &Ctx, EvalInfo &Info) {
+ bool IsConst;
+ if (FastEvaluateAsRValue(E, Result, Ctx, IsConst))
+ return IsConst;
+
+ return EvaluateAsRValue(Info, E, Result.Val);
+}
+
+static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult,
+ const ASTContext &Ctx,
+ Expr::SideEffectsKind AllowSideEffects,
+ EvalInfo &Info) {
+ if (!E->getType()->isIntegralOrEnumerationType())
+ return false;
+
+ if (!::EvaluateAsRValue(E, ExprResult, Ctx, Info) ||
+ !ExprResult.Val.isInt() ||
+ hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
+ return false;
+
+ return true;
+}
/// EvaluateAsRValue - Return true if this is a constant which we can fold using
/// any crazy technique (that has nothing to do with language standards) that
/// we want to. If this function returns true, it returns the folded constant
/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
/// will be applied to the result.
-bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
- bool IsConst;
- if (FastEvaluateAsRValue(this, Result, Ctx, IsConst))
- return IsConst;
-
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
+ bool InConstantContext) const {
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
- return ::EvaluateAsRValue(Info, this, Result.Val);
+ Info.InConstantContext = InConstantContext;
+ return ::EvaluateAsRValue(this, Result, Ctx, Info);
}
bool Expr::EvaluateAsBooleanCondition(bool &Result,
@@ -10715,24 +10948,10 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result,
HandleConversionToBool(Scratch.Val, Result);
}
-static bool hasUnacceptableSideEffect(Expr::EvalStatus &Result,
- Expr::SideEffectsKind SEK) {
- return (SEK < Expr::SE_AllowSideEffects && Result.HasSideEffects) ||
- (SEK < Expr::SE_AllowUndefinedBehavior && Result.HasUndefinedBehavior);
-}
-
-bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects) const {
- if (!getType()->isIntegralOrEnumerationType())
- return false;
-
- EvalResult ExprResult;
- if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
- hasUnacceptableSideEffect(ExprResult, AllowSideEffects))
- return false;
-
- Result = ExprResult.Val.getInt();
- return true;
+ EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
+ return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info);
}
bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
@@ -10790,6 +11009,7 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
? EvalInfo::EM_ConstantExpression
: EvalInfo::EM_ConstantFold);
InitInfo.setEvaluatingDecl(VD, Value);
+ InitInfo.InConstantContext = true;
LValue LVal;
LVal.set(VD);
@@ -10819,28 +11039,46 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
/// constant folded, but discard the result.
bool Expr::isEvaluatable(const ASTContext &Ctx, SideEffectsKind SEK) const {
EvalResult Result;
- return EvaluateAsRValue(Result, Ctx) &&
+ return EvaluateAsRValue(Result, Ctx, /* in constant context */ true) &&
!hasUnacceptableSideEffect(Result, SEK);
}
APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
- EvalResult EvalResult;
- EvalResult.Diag = Diag;
- bool Result = EvaluateAsRValue(EvalResult, Ctx);
+ EvalResult EVResult;
+ EVResult.Diag = Diag;
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = true;
+
+ bool Result = ::EvaluateAsRValue(this, EVResult, Ctx, Info);
(void)Result;
assert(Result && "Could not evaluate expression");
- assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+ assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
- return EvalResult.Val.getInt();
+ return EVResult.Val.getInt();
+}
+
+APSInt Expr::EvaluateKnownConstIntCheckOverflow(
+ const ASTContext &Ctx, SmallVectorImpl<PartialDiagnosticAt> *Diag) const {
+ EvalResult EVResult;
+ EVResult.Diag = Diag;
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_EvaluateForOverflow);
+ Info.InConstantContext = true;
+
+ bool Result = ::EvaluateAsRValue(Info, this, EVResult.Val);
+ (void)Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EVResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EVResult.Val.getInt();
}
void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
bool IsConst;
- EvalResult EvalResult;
- if (!FastEvaluateAsRValue(this, EvalResult, Ctx, IsConst)) {
- EvalInfo Info(Ctx, EvalResult, EvalInfo::EM_EvaluateForOverflow);
- (void)::EvaluateAsRValue(Info, this, EvalResult.Val);
+ EvalResult EVResult;
+ if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) {
+ EvalInfo Info(Ctx, EVResult, EvalInfo::EM_EvaluateForOverflow);
+ (void)::EvaluateAsRValue(Info, this, EVResult.Val);
}
}
@@ -10893,9 +11131,13 @@ static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
Expr::EvalResult EVResult;
- if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
+
+ Info.InConstantContext = true;
+ if (!::EvaluateAsRValue(E, EVResult, Ctx, Info) || EVResult.HasSideEffects ||
!EVResult.Val.isInt())
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
return NoDiag();
}
@@ -10903,7 +11145,7 @@ static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
assert(!E->isValueDependent() && "Should not see value dependent exprs!");
if (!E->getType()->isIntegralOrEnumerationType())
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
switch (E->getStmtClass()) {
#define ABSTRACT_STMT(Node)
@@ -10987,7 +11229,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CoawaitExprClass:
case Expr::DependentCoawaitExprClass:
case Expr::CoyieldExprClass:
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
case Expr::InitListExprClass: {
// C++03 [dcl.init]p13: If T is a scalar type, then a declaration of the
@@ -10997,7 +11239,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
if (E->isRValue())
if (cast<InitListExpr>(E)->getNumInits() == 1)
return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
}
case Expr::SizeOfPackExprClass:
@@ -11009,6 +11251,9 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return
CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+ case Expr::ConstantExprClass:
+ return CheckICE(cast<ConstantExpr>(E)->getSubExpr(), Ctx);
+
case Expr::ParenExprClass:
return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
case Expr::GenericSelectionExprClass:
@@ -11032,7 +11277,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
const CallExpr *CE = cast<CallExpr>(E);
if (CE->getBuiltinCallee())
return CheckEvalInICE(E, Ctx);
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
}
case Expr::DeclRefExprClass: {
if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
@@ -11062,7 +11307,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return ICEDiag(IK_NotICE, cast<DeclRefExpr>(E)->getLocation());
}
}
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
}
case Expr::UnaryOperatorClass: {
const UnaryOperator *Exp = cast<UnaryOperator>(E);
@@ -11077,7 +11322,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// C99 6.6/3 allows increment and decrement within unevaluated
// subexpressions of constant expressions, but they can never be ICEs
// because an ICE cannot contain an lvalue operand.
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
case UO_Extension:
case UO_LNot:
case UO_Plus:
@@ -11087,9 +11332,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case UO_Imag:
return CheckICE(Exp->getSubExpr(), Ctx);
}
-
- // OffsetOf falls through here.
- LLVM_FALLTHROUGH;
+ llvm_unreachable("invalid unary operator class");
}
case Expr::OffsetOfExprClass: {
// Note that per C99, offsetof must be an ICE. And AFAIK, using
@@ -11104,7 +11347,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
if ((Exp->getKind() == UETT_SizeOf) &&
Exp->getTypeOfArgument()->isVariableArrayType())
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
return NoDiag();
}
case Expr::BinaryOperatorClass: {
@@ -11126,7 +11369,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// C99 6.6/3 allows assignments within unevaluated subexpressions of
// constant expressions, but they can never be ICEs because an ICE cannot
// contain an lvalue operand.
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
case BO_Mul:
case BO_Div:
@@ -11155,11 +11398,11 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
if (REval == 0)
- return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
if (REval.isSigned() && REval.isAllOnesValue()) {
llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
if (LEval.isMinSignedValue())
- return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
}
}
}
@@ -11168,10 +11411,10 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// C99 6.6p3 introduces a strange edge case: comma can be in an ICE
// if it isn't evaluated.
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE)
- return ICEDiag(IK_ICEIfUnevaluated, E->getLocStart());
+ return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
} else {
// In both C89 and C++, commas in ICEs are illegal.
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
}
}
return Worst(LHSResult, RHSResult);
@@ -11193,7 +11436,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return Worst(LHSResult, RHSResult);
}
}
- LLVM_FALLTHROUGH;
+ llvm_unreachable("invalid binary operator kind");
}
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
@@ -11216,7 +11459,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
if (FL->getValue().convertToInteger(IgnoredVal,
llvm::APFloat::rmTowardZero,
&Ignored) & APFloat::opInvalidOp)
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
return NoDiag();
}
}
@@ -11229,7 +11472,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case CK_IntegralCast:
return CheckICE(SubExpr, Ctx);
default:
- return ICEDiag(IK_NotICE, E->getLocStart());
+ return ICEDiag(IK_NotICE, E->getBeginLoc());
}
}
case Expr::BinaryConditionalOperatorClass: {
@@ -11330,12 +11573,20 @@ bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, const ASTContext &Ctx,
if (!isIntegerConstantExpr(Ctx, Loc))
return false;
+
// The only possible side-effects here are due to UB discovered in the
// evaluation (for instance, INT_MAX + 1). In such a case, we are still
// required to treat the expression as an ICE, so we produce the folded
// value.
- if (!EvaluateAsInt(Value, Ctx, SE_AllowSideEffects))
+ EvalResult ExprResult;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_IgnoreSideEffects);
+ Info.InConstantContext = true;
+
+ if (!::EvaluateAsInt(this, ExprResult, Ctx, SE_AllowSideEffects, Info))
llvm_unreachable("ICE cannot be evaluated!");
+
+ Value = ExprResult.Val.getInt();
return true;
}
@@ -11421,6 +11672,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
EvalInfo Info(FD->getASTContext(), Status,
EvalInfo::EM_PotentialConstantExpression);
+ Info.InConstantContext = true;
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : nullptr;
diff --git a/lib/AST/ExternalASTMerger.cpp b/lib/AST/ExternalASTMerger.cpp
index ae28c588ca31..12e6bfc041a4 100644
--- a/lib/AST/ExternalASTMerger.cpp
+++ b/lib/AST/ExternalASTMerger.cpp
@@ -144,14 +144,14 @@ public:
}
if (auto *ToTag = dyn_cast<TagDecl>(To)) {
ToTag->setHasExternalLexicalStorage();
- ToTag->setMustBuildLookupTable();
+ ToTag->getPrimaryContext()->setMustBuildLookupTable();
assert(Parent.CanComplete(ToTag));
} else if (auto *ToNamespace = dyn_cast<NamespaceDecl>(To)) {
ToNamespace->setHasExternalVisibleStorage();
assert(Parent.CanComplete(ToNamespace));
} else if (auto *ToContainer = dyn_cast<ObjCContainerDecl>(To)) {
ToContainer->setHasExternalLexicalStorage();
- ToContainer->setMustBuildLookupTable();
+ ToContainer->getPrimaryContext()->setMustBuildLookupTable();
assert(Parent.CanComplete(ToContainer));
}
return To;
@@ -230,7 +230,8 @@ void ExternalASTMerger::CompleteType(TagDecl *Tag) {
if (!SourceTag->getDefinition())
return false;
Forward.MapImported(SourceTag, Tag);
- Forward.ImportDefinition(SourceTag);
+ if (llvm::Error Err = Forward.ImportDefinition_New(SourceTag))
+ llvm::consumeError(std::move(Err));
Tag->setCompleteDefinition(SourceTag->isCompleteDefinition());
return true;
});
@@ -249,7 +250,8 @@ void ExternalASTMerger::CompleteType(ObjCInterfaceDecl *Interface) {
if (!SourceInterface->getDefinition())
return false;
Forward.MapImported(SourceInterface, Interface);
- Forward.ImportDefinition(SourceInterface);
+ if (llvm::Error Err = Forward.ImportDefinition_New(SourceInterface))
+ llvm::consumeError(std::move(Err));
return true;
});
}
diff --git a/lib/Analysis/FormatString.cpp b/lib/AST/FormatString.cpp
index f37e4affae3f..04bd48f14a2a 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/AST/FormatString.cpp
@@ -179,6 +179,36 @@ clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
}
bool
+clang::analyze_format_string::ParseVectorModifier(FormatStringHandler &H,
+ FormatSpecifier &FS,
+ const char *&I,
+ const char *E,
+ const LangOptions &LO) {
+ if (!LO.OpenCL)
+ return false;
+
+ const char *Start = I;
+ if (*I == 'v') {
+ ++I;
+
+ if (I == E) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ OptionalAmount NumElts = ParseAmount(I, E);
+ if (NumElts.getHowSpecified() != OptionalAmount::Constant) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ FS.setVectorNumElts(NumElts);
+ }
+
+ return false;
+}
+
+bool
clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
const char *&I,
const char *E,
@@ -406,12 +436,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
}
case WIntTy: {
+ QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
- QualType PromoArg =
- argTy->isPromotableIntegerType()
- ? C.getPromotedIntegerType(argTy) : argTy;
+ if (C.getCanonicalType(argTy).getUnqualifiedType() == WInt)
+ return Match;
- QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
+ QualType PromoArg = argTy->isPromotableIntegerType()
+ ? C.getPromotedIntegerType(argTy)
+ : argTy;
PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
// If the promoted argument is the corresponding signed type of the
@@ -455,6 +487,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
llvm_unreachable("Invalid ArgType Kind!");
}
+ArgType ArgType::makeVectorType(ASTContext &C, unsigned NumElts) const {
+ if (K != SpecificTy) // Won't be a valid vector element type.
+ return ArgType::Invalid();
+
+ QualType Vec = C.getExtVectorType(T, NumElts);
+ return ArgType(Vec, Name);
+}
+
QualType ArgType::getRepresentativeType(ASTContext &C) const {
QualType Res;
switch (K) {
@@ -687,7 +727,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
break;
}
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case LengthModifier::AsChar:
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
@@ -872,7 +912,7 @@ bool FormatSpecifier::hasStandardConversionSpecifier(
return true;
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
- return LangOpt.ObjC1 || LangOpt.ObjC2;
+ return LangOpt.ObjC;
case ConversionSpecifier::InvalidSpecifier:
case ConversionSpecifier::FreeBSDbArg:
case ConversionSpecifier::FreeBSDDArg:
diff --git a/lib/Analysis/FormatStringParsing.h b/lib/AST/FormatStringParsing.h
index a63140b366cd..9da829adcb49 100644
--- a/lib/Analysis/FormatStringParsing.h
+++ b/lib/AST/FormatStringParsing.h
@@ -3,7 +3,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
namespace clang {
@@ -41,6 +41,10 @@ bool ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &CS, const char *Start,
const char *&Beg, const char *E);
+bool ParseVectorModifier(FormatStringHandler &H,
+ FormatSpecifier &FS, const char *&Beg, const char *E,
+ const LangOptions &LO);
+
/// Returns true if a LengthModifier was parsed and installed in the
/// FormatSpecifier& argument, and false otherwise.
bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 2dc04f2f3d86..98c843db31d6 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -33,12 +33,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#define MANGLE_CHECKER 0
-
-#if MANGLE_CHECKER
-#include <cxxabi.h>
-#endif
-
using namespace clang;
namespace {
@@ -323,7 +317,7 @@ class CXXNameMangler {
AdditionalAbiTags->end());
}
- llvm::sort(TagList.begin(), TagList.end());
+ llvm::sort(TagList);
TagList.erase(std::unique(TagList.begin(), TagList.end()), TagList.end());
writeSortedUniqueAbiTags(Out, TagList);
@@ -339,7 +333,7 @@ class CXXNameMangler {
}
const AbiTagList &getSortedUniqueUsedAbiTags() {
- llvm::sort(UsedAbiTags.begin(), UsedAbiTags.end());
+ llvm::sort(UsedAbiTags);
UsedAbiTags.erase(std::unique(UsedAbiTags.begin(), UsedAbiTags.end()),
UsedAbiTags.end());
return UsedAbiTags;
@@ -415,17 +409,6 @@ public:
SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
-#if MANGLE_CHECKER
- ~CXXNameMangler() {
- if (Out.str()[0] == '\01')
- return;
-
- int status = 0;
- char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
- assert(status == 0 && "Could not demangle mangled name!");
- free(result);
- }
-#endif
raw_ostream &getStream() { return Out; }
void disableDerivedAbiTags() { DisableDerivedAbiTags = true; }
@@ -721,10 +704,8 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) {
if (FD->hasAttr<EnableIfAttr>()) {
FunctionTypeDepthState Saved = FunctionTypeDepth.push();
Out << "Ua9enable_ifI";
- // FIXME: specific_attr_iterator iterates in reverse order. Fix that and use
- // it here.
- for (AttrVec::const_reverse_iterator I = FD->getAttrs().rbegin(),
- E = FD->getAttrs().rend();
+ for (AttrVec::const_iterator I = FD->getAttrs().begin(),
+ E = FD->getAttrs().end();
I != E; ++I) {
EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I);
if (!EIA)
@@ -1522,8 +1503,7 @@ void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
Out << 'N';
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
- Qualifiers MethodQuals =
- Qualifiers::fromCVRUMask(Method->getTypeQualifiers());
+ Qualifiers MethodQuals = Method->getTypeQualifiers();
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
MethodQuals.removeRestrict();
@@ -2654,6 +2634,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::OCLReserveID:
Out << "13ocl_reserveid";
break;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ type_name = "ocl_" #ExtType; \
+ Out << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
}
@@ -2662,16 +2648,12 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_C:
return "";
- case CC_X86StdCall:
- case CC_X86FastCall:
- case CC_X86ThisCall:
case CC_X86VectorCall:
case CC_X86Pascal:
- case CC_Win64:
- case CC_X86_64SysV:
case CC_X86RegCall:
case CC_AAPCS:
case CC_AAPCS_VFP:
+ case CC_AArch64VectorCall:
case CC_IntelOclBicc:
case CC_SpirFunction:
case CC_OpenCLKernel:
@@ -2680,6 +2662,22 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
// FIXME: we should be mangling all of the above.
return "";
+ case CC_X86ThisCall:
+ // FIXME: To match mingw GCC, thiscall should only be mangled in when it is
+ // used explicitly. At this point, we don't have that much information in
+ // the AST, since clang tends to bake the convention into the canonical
+ // function type. thiscall only rarely used explicitly, so don't mangle it
+ // for now.
+ return "";
+
+ case CC_X86StdCall:
+ return "stdcall";
+ case CC_X86FastCall:
+ return "fastcall";
+ case CC_X86_64SysV:
+ return "sysv_abi";
+ case CC_Win64:
+ return "ms_abi";
case CC_Swift:
return "swiftcall";
}
@@ -2737,7 +2735,7 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// Mangle CV-qualifiers, if present. These are 'this' qualifiers,
// e.g. "const" in "int (A::*)() const".
- mangleQualifiers(Qualifiers::fromCVRUMask(T->getTypeQuals()));
+ mangleQualifiers(T->getTypeQuals());
// Mangle instantiation-dependent exception-specification, if present,
// per cxx-abi-dev proposal on 2016-10-11.
@@ -3526,6 +3524,10 @@ recurse:
case Expr::CXXInheritedCtorInitExprClass:
llvm_unreachable("unexpected statement kind");
+ case Expr::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ goto recurse;
+
// FIXME: invent manglings for all these.
case Expr::BlockExprClass:
case Expr::ChooseExprClass:
@@ -3883,6 +3885,7 @@ recurse:
case UETT_SizeOf:
Out << 's';
break;
+ case UETT_PreferredAlignOf:
case UETT_AlignOf:
Out << 'a';
break;
diff --git a/lib/AST/Linkage.h b/lib/AST/Linkage.h
index e6489c7ef2b3..8ad748bcc4a2 100644
--- a/lib/AST/Linkage.h
+++ b/lib/AST/Linkage.h
@@ -20,6 +20,7 @@
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
namespace clang {
/// Kinds of LV computation. The linkage side of the computation is
@@ -36,6 +37,8 @@ struct LVComputationKind {
/// in computing linkage.
unsigned IgnoreAllVisibility : 1;
+ enum { NumLVComputationKindBits = 3 };
+
explicit LVComputationKind(NamedDecl::ExplicitVisibilityKind EK)
: ExplicitKind(EK), IgnoreExplicitVisibility(false),
IgnoreAllVisibility(false) {}
@@ -78,12 +81,14 @@ class LinkageComputer {
// using C = Foo<B, B>;
// using D = Foo<C, C>;
//
- // The unsigned represents an LVComputationKind.
- using QueryType = std::pair<const NamedDecl *, unsigned>;
+ // The integer represents an LVComputationKind.
+ using QueryType =
+ llvm::PointerIntPair<const NamedDecl *,
+ LVComputationKind::NumLVComputationKindBits>;
llvm::SmallDenseMap<QueryType, LinkageInfo, 8> CachedLinkageInfo;
static QueryType makeCacheKey(const NamedDecl *ND, LVComputationKind Kind) {
- return std::make_pair(ND, Kind.toBits());
+ return QueryType(ND, Kind.toBits());
}
llvm::Optional<LinkageInfo> lookup(const NamedDecl *ND,
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index b0e5146e8194..bb29bffc1b8f 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -25,12 +25,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#define MANGLE_CHECKER 0
-
-#if MANGLE_CHECKER
-#include <cxxabi.h>
-#endif
-
using namespace clang;
// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 5db51b5cb384..92e9679e49aa 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -29,6 +29,7 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/JamCRC.h"
+#include "llvm/Support/xxhash.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
@@ -127,10 +128,10 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds;
llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds;
+ SmallString<16> AnonymousNamespaceHash;
public:
- MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags)
- : MicrosoftMangleContext(Context, Diags) {}
+ MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags);
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
void mangleCXXName(const NamedDecl *D, raw_ostream &Out) override;
@@ -238,6 +239,12 @@ public:
return Result.first->second;
}
+ /// Return a character sequence that is (somewhat) unique to the TU suitable
+ /// for mangling anonymous namespaces.
+ StringRef getAnonymousNamespaceHash() const {
+ return AnonymousNamespaceHash;
+ }
+
private:
void mangleInitFiniStub(const VarDecl *D, char CharCode, raw_ostream &Out);
};
@@ -302,13 +309,15 @@ public:
const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
void mangleTagTypeKind(TagTypeKind TK);
- void mangleArtificalTagType(TagTypeKind TK, StringRef UnqualifiedName,
+ void mangleArtificialTagType(TagTypeKind TK, StringRef UnqualifiedName,
ArrayRef<StringRef> NestedNames = None);
+ void mangleAddressSpaceType(QualType T, Qualifiers Quals, SourceRange Range);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
void mangleFunctionType(const FunctionType *T,
const FunctionDecl *D = nullptr,
- bool ForceThisQuals = false);
+ bool ForceThisQuals = false,
+ bool MangleExceptionSpec = true);
void mangleNestedName(const NamedDecl *ND);
private:
@@ -368,9 +377,39 @@ private:
void mangleObjCProtocol(const ObjCProtocolDecl *PD);
void mangleObjCLifetime(const QualType T, Qualifiers Quals,
SourceRange Range);
+ void mangleObjCKindOfType(const ObjCObjectType *T, Qualifiers Quals,
+ SourceRange Range);
};
}
+MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context,
+ DiagnosticsEngine &Diags)
+ : MicrosoftMangleContext(Context, Diags) {
+ // To mangle anonymous namespaces, hash the path to the main source file. The
+ // path should be whatever (probably relative) path was passed on the command
+ // line. The goal is for the compiler to produce the same output regardless of
+ // working directory, so use the uncanonicalized relative path.
+ //
+ // It's important to make the mangled names unique because, when CodeView
+ // debug info is in use, the debugger uses mangled type names to distinguish
+ // between otherwise identically named types in anonymous namespaces.
+ //
+ // These symbols are always internal, so there is no need for the hash to
+ // match what MSVC produces. For the same reason, clang is free to change the
+ // hash at any time without breaking compatibility with old versions of clang.
+ // The generated names are intended to look similar to what MSVC generates,
+ // which are something like "?A0x01234567@".
+ SourceManager &SM = Context.getSourceManager();
+ if (const FileEntry *FE = SM.getFileEntryForID(SM.getMainFileID())) {
+ // Truncate the hash so we get 8 characters of hexadecimal.
+ uint32_t TruncatedHash = uint32_t(xxHash64(FE->getName()));
+ AnonymousNamespaceHash = llvm::utohexstr(TruncatedHash);
+ } else {
+ // If we don't have a path to the main file, we'll just use 0.
+ AnonymousNamespaceHash = "0";
+ }
+}
+
bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
LanguageLinkage L = FD->getLanguageLinkage();
@@ -475,7 +514,7 @@ void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
mangleFunctionClass(FD);
- mangleFunctionType(FT, FD);
+ mangleFunctionType(FT, FD, false, false);
} else {
Out << '9';
}
@@ -785,7 +824,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
if (NS->isAnonymousNamespace()) {
- Out << "?A@";
+ Out << "?A0x" << Context.getAnonymousNamespaceHash() << '@';
break;
}
}
@@ -905,8 +944,14 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
- case DeclarationName::ObjCMultiArgSelector:
- llvm_unreachable("Can't mangle Objective-C selector names here!");
+ case DeclarationName::ObjCMultiArgSelector: {
+ // This is reachable only when constructing an outlined SEH finally
+ // block. Nothing depends on this mangling and it's used only with
+ // functinos with internal linkage.
+ llvm::SmallString<64> Name;
+ mangleSourceName(Name.str());
+ break;
+ }
case DeclarationName::CXXConstructorName:
if (isStructorDecl(ND)) {
@@ -1027,7 +1072,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
if (PointersAre64Bit)
Out << 'E';
Out << 'A';
- mangleArtificalTagType(TTK_Struct,
+ mangleArtificialTagType(TTK_Struct,
Discriminate("__block_literal", Discriminator,
ParameterDiscriminator));
Out << "@Z";
@@ -1322,7 +1367,7 @@ void MicrosoftCXXNameMangler::mangleExpression(const Expr *E) {
// It's a global variable.
Out << '3';
// It's a struct called __s_GUID.
- mangleArtificalTagType(TTK_Struct, "__s_GUID");
+ mangleArtificialTagType(TTK_Struct, "__s_GUID");
// It's const.
Out << 'B';
return;
@@ -1343,9 +1388,16 @@ void MicrosoftCXXNameMangler::mangleTemplateArgs(
assert(TPL->size() == TemplateArgs.size() &&
"size mismatch between args and parms!");
- unsigned Idx = 0;
- for (const TemplateArgument &TA : TemplateArgs.asArray())
- mangleTemplateArg(TD, TA, TPL->getParam(Idx++));
+ for (size_t i = 0; i < TemplateArgs.size(); ++i) {
+ const TemplateArgument &TA = TemplateArgs[i];
+
+ // Separate consecutive packs by $$Z.
+ if (i > 0 && TA.getKind() == TemplateArgument::Pack &&
+ TemplateArgs[i - 1].getKind() == TemplateArgument::Pack)
+ Out << "$$Z";
+
+ mangleTemplateArg(TD, TA, TPL->getParam(i));
+ }
}
void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
@@ -1471,9 +1523,9 @@ void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) {
Stream << "?$";
Extra.mangleSourceName("Protocol");
- Extra.mangleArtificalTagType(TTK_Struct, PD->getName());
+ Extra.mangleArtificialTagType(TTK_Struct, PD->getName());
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
@@ -1502,7 +1554,24 @@ void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
Extra.manglePointerExtQualifiers(Quals, Type);
Extra.mangleType(Type, Range);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+}
+
+void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
+ Qualifiers Quals,
+ SourceRange Range) {
+ llvm::SmallString<64> TemplateMangling;
+ llvm::raw_svector_ostream Stream(TemplateMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+
+ Stream << "?$";
+ Extra.mangleSourceName("KindOf");
+ Extra.mangleType(QualType(T, 0)
+ .stripObjCKindOfType(getASTContext())
+ ->getAs<ObjCObjectType>(),
+ Quals, Range);
+
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
@@ -1698,7 +1767,7 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
if (Found == TypeBackReferences.end()) {
- mangleArtificalTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
+ mangleArtificialTagType(TTK_Enum, "__pass_object_size" + llvm::utostr(Type),
{"__clang"});
if (TypeBackReferences.size() < 10) {
@@ -1710,12 +1779,77 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
}
}
+void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T,
+ Qualifiers Quals,
+ SourceRange Range) {
+ // Address space is mangled as an unqualified templated type in the __clang
+ // namespace. The demangled version of this is:
+ // In the case of a language specific address space:
+ // __clang::struct _AS[language_addr_space]<Type>
+ // where:
+ // <language_addr_space> ::= <OpenCL-addrspace> | <CUDA-addrspace>
+ // <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" |
+ // "private"| "generic" ]
+ // <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
+ // Note that the above were chosen to match the Itanium mangling for this.
+ //
+ // In the case of a non-language specific address space:
+ // __clang::struct _AS<TargetAS, Type>
+ assert(Quals.hasAddressSpace() && "Not valid without address space");
+ llvm::SmallString<32> ASMangling;
+ llvm::raw_svector_ostream Stream(ASMangling);
+ MicrosoftCXXNameMangler Extra(Context, Stream);
+ Stream << "?$";
+
+ LangAS AS = Quals.getAddressSpace();
+ if (Context.getASTContext().addressSpaceMapManglingFor(AS)) {
+ unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS);
+ Extra.mangleSourceName("_AS");
+ Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(TargetAS),
+ /*IsBoolean*/ false);
+ } else {
+ switch (AS) {
+ default:
+ llvm_unreachable("Not a language specific address space");
+ case LangAS::opencl_global:
+ Extra.mangleSourceName("_ASCLglobal");
+ break;
+ case LangAS::opencl_local:
+ Extra.mangleSourceName("_ASCLlocal");
+ break;
+ case LangAS::opencl_constant:
+ Extra.mangleSourceName("_ASCLconstant");
+ break;
+ case LangAS::opencl_private:
+ Extra.mangleSourceName("_ASCLprivate");
+ break;
+ case LangAS::opencl_generic:
+ Extra.mangleSourceName("_ASCLgeneric");
+ break;
+ case LangAS::cuda_device:
+ Extra.mangleSourceName("_ASCUdevice");
+ break;
+ case LangAS::cuda_constant:
+ Extra.mangleSourceName("_ASCUconstant");
+ break;
+ case LangAS::cuda_shared:
+ Extra.mangleSourceName("_ASCUshared");
+ break;
+ }
+ }
+
+ Extra.mangleType(T, Range, QMM_Escape);
+ mangleQualifiers(Qualifiers(), false);
+ mangleArtificialTagType(TTK_Struct, ASMangling, {"__clang"});
+}
+
void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM) {
// Don't use the canonical types. MSVC includes things like 'const' on
// pointer arguments to function pointers that canonicalization strips away.
T = T.getDesugaredType(getASTContext());
Qualifiers Quals = T.getLocalQualifiers();
+
if (const ArrayType *AT = getASTContext().getAsArrayType(T)) {
// If there were any Quals, getAsArrayType() pushed them onto the array
// element type.
@@ -1884,13 +2018,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId:
- mangleArtificalTagType(TTK_Struct, "objc_object");
+ mangleArtificialTagType(TTK_Struct, "objc_object");
break;
case BuiltinType::ObjCClass:
- mangleArtificalTagType(TTK_Struct, "objc_class");
+ mangleArtificialTagType(TTK_Struct, "objc_class");
break;
case BuiltinType::ObjCSel:
- mangleArtificalTagType(TTK_Struct, "objc_selector");
+ mangleArtificialTagType(TTK_Struct, "objc_selector");
break;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -1900,35 +2034,40 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_sampler");
+ mangleArtificialTagType(TTK_Struct, "ocl_sampler");
break;
case BuiltinType::OCLEvent:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_event");
+ mangleArtificialTagType(TTK_Struct, "ocl_event");
break;
case BuiltinType::OCLClkEvent:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_clkevent");
+ mangleArtificialTagType(TTK_Struct, "ocl_clkevent");
break;
case BuiltinType::OCLQueue:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_queue");
+ mangleArtificialTagType(TTK_Struct, "ocl_queue");
break;
case BuiltinType::OCLReserveID:
Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_reserveid");
+ mangleArtificialTagType(TTK_Struct, "ocl_reserveid");
+ break;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ mangleArtificialTagType(TTK_Struct, "ocl_" #ExtType); \
break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::NullPtr:
Out << "$$T";
break;
case BuiltinType::Float16:
- mangleArtificalTagType(TTK_Struct, "_Float16", {"__clang"});
+ mangleArtificialTagType(TTK_Struct, "_Float16", {"__clang"});
break;
case BuiltinType::Half:
- mangleArtificalTagType(TTK_Struct, "_Half", {"__clang"});
+ mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"});
break;
case BuiltinType::ShortAccum:
@@ -1989,7 +2128,8 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T,
void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
const FunctionDecl *D,
- bool ForceThisQuals) {
+ bool ForceThisQuals,
+ bool MangleExceptionSpec) {
// <function-type> ::= <this-cvr-qualifiers> <calling-convention>
// <return-type> <argument-list> <throw-spec>
const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(T);
@@ -2021,7 +2161,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// If this is a C++ instance method, mangle the CVR qualifiers for the
// this pointer.
if (HasThisQuals) {
- Qualifiers Quals = Qualifiers::fromCVRUMask(Proto->getTypeQuals());
+ Qualifiers Quals = Proto->getTypeQuals();
manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType());
mangleRefQualifier(Proto->getRefQualifier());
mangleQualifiers(Quals, /*IsMember=*/false);
@@ -2122,7 +2262,12 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
Out << '@';
}
- mangleThrowSpecification(Proto);
+ if (MangleExceptionSpec && getASTContext().getLangOpts().CPlusPlus17 &&
+ getASTContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2017_5))
+ mangleThrowSpecification(Proto);
+ else
+ Out << 'Z';
}
void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
@@ -2227,15 +2372,15 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
mangleCallingConvention(T->getCallConv());
}
+
void MicrosoftCXXNameMangler::mangleThrowSpecification(
const FunctionProtoType *FT) {
- // <throw-spec> ::= Z # throw(...) (default)
- // ::= @ # throw() or __declspec/__attribute__((nothrow))
- // ::= <type>+
- // NOTE: Since the Microsoft compiler ignores throw specifications, they are
- // all actually mangled as 'Z'. (They're ignored because their associated
- // functionality isn't implemented, and probably never will be.)
- Out << 'Z';
+ // <throw-spec> ::= Z # (default)
+ // ::= _E # noexcept
+ if (FT->canThrow())
+ Out << 'Z';
+ else
+ Out << "_E";
}
void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
@@ -2285,7 +2430,7 @@ void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) {
}
// If you add a call to this, consider updating isArtificialTagType() too.
-void MicrosoftCXXNameMangler::mangleArtificalTagType(
+void MicrosoftCXXNameMangler::mangleArtificialTagType(
TagTypeKind TK, StringRef UnqualifiedName,
ArrayRef<StringRef> NestedNames) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
@@ -2416,7 +2561,11 @@ void MicrosoftCXXNameMangler::mangleType(const PointerType *T, Qualifiers Quals,
QualType PointeeType = T->getPointeeType();
manglePointerCVQualifiers(Quals);
manglePointerExtQualifiers(Quals, PointeeType);
- mangleType(PointeeType, Range);
+
+ if (PointeeType.getQualifiers().hasAddressSpace())
+ mangleAddressSpaceType(PointeeType, PointeeType.getQualifiers(), Range);
+ else
+ mangleType(PointeeType, Range);
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
@@ -2471,10 +2620,10 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
Extra.mangleSourceName("_Complex");
Extra.mangleType(ElementType, Range, QMM_Escape);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
-// Returns true for types that mangleArtificalTagType() gets called for with
+// Returns true for types that mangleArtificialTagType() gets called for with
// TTK_Union, TTK_Struct, TTK_Class and where compatibility with MSVC's
// mangling matters.
// (It doesn't matter for Objective-C types and the like that cl.exe doesn't
@@ -2487,7 +2636,7 @@ bool MicrosoftCXXNameMangler::isArtificialTagType(QualType T) const {
case Type::Vector: {
// For ABI compatibility only __m64, __m128(id), and __m256(id) matter,
- // but since mangleType(VectorType*) always calls mangleArtificalTagType()
+ // but since mangleType(VectorType*) always calls mangleArtificialTagType()
// just always return true (the other vector types are clang-only).
return true;
}
@@ -2502,18 +2651,20 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
// Pattern match exactly the typedefs in our intrinsic headers. Anything that
// doesn't match the Intel types uses a custom mangling below.
size_t OutSizeBefore = Out.tell();
- llvm::Triple::ArchType AT =
- getASTContext().getTargetInfo().getTriple().getArch();
- if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
- if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
- mangleArtificalTagType(TTK_Union, "__m64");
- } else if (Width >= 128) {
- if (ET->getKind() == BuiltinType::Float)
- mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width));
- else if (ET->getKind() == BuiltinType::LongLong)
- mangleArtificalTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
- else if (ET->getKind() == BuiltinType::Double)
- mangleArtificalTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ if (!isa<ExtVectorType>(T)) {
+ llvm::Triple::ArchType AT =
+ getASTContext().getTargetInfo().getTriple().getArch();
+ if (AT == llvm::Triple::x86 || AT == llvm::Triple::x86_64) {
+ if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
+ mangleArtificialTagType(TTK_Union, "__m64");
+ } else if (Width >= 128) {
+ if (ET->getKind() == BuiltinType::Float)
+ mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width));
+ else if (ET->getKind() == BuiltinType::LongLong)
+ mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
+ else if (ET->getKind() == BuiltinType::Double)
+ mangleArtificialTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ }
}
}
@@ -2532,7 +2683,7 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()),
/*IsBoolean=*/false);
- mangleArtificalTagType(TTK_Union, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Union, TemplateMangling, {"__clang"});
}
}
@@ -2575,9 +2726,12 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
mangleName(T->getDecl());
}
-void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
- SourceRange Range) {
- if (T->qual_empty())
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
+ Qualifiers Quals, SourceRange Range) {
+ if (T->isKindOfType())
+ return mangleObjCKindOfType(T, Quals, Range);
+
+ if (T->qual_empty() && !T->isSpecialized())
return mangleType(T->getBaseType(), Range, QMM_Drop);
ArgBackRefMap OuterArgsContext;
@@ -2598,6 +2752,11 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
for (const auto &Q : T->quals())
mangleObjCProtocol(Q);
+
+ if (T->isSpecialized())
+ for (const auto &TA : T->getTypeArgs())
+ mangleType(TA, Range, QMM_Drop);
+
Out << '@';
Out << '@';
@@ -2728,7 +2887,7 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
Extra.mangleSourceName("_Atomic");
Extra.mangleType(ValueType, Range, QMM_Escape);
- mangleArtificalTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
@@ -2780,14 +2939,14 @@ void MicrosoftMangleContextImpl::mangleCXXName(const NamedDecl *D,
// <vtordisp-shift> ::= <offset-to-vtordisp>
// <vtordispex-shift> ::= <offset-to-vbptr> <vbase-offset-offset>
// <offset-to-vtordisp>
-static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
+static void mangleThunkThisAdjustment(AccessSpecifier AS,
const ThisAdjustment &Adjustment,
MicrosoftCXXNameMangler &Mangler,
raw_ostream &Out) {
if (!Adjustment.Virtual.isEmpty()) {
Out << '$';
char AccessSpec;
- switch (MD->getAccess()) {
+ switch (AS) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
@@ -2815,7 +2974,7 @@ static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
}
} else if (Adjustment.NonVirtual != 0) {
- switch (MD->getAccess()) {
+ switch (AS) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
@@ -2829,7 +2988,7 @@ static void mangleThunkThisAdjustment(const CXXMethodDecl *MD,
}
Mangler.mangleNumber(-static_cast<uint32_t>(Adjustment.NonVirtual));
} else {
- switch (MD->getAccess()) {
+ switch (AS) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
@@ -2860,7 +3019,13 @@ void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
MicrosoftCXXNameMangler Mangler(*this, MHO);
Mangler.getStream() << '?';
Mangler.mangleName(MD);
- mangleThunkThisAdjustment(MD, Thunk.This, Mangler, MHO);
+
+ // Usually the thunk uses the access specifier of the new method, but if this
+ // is a covariant return thunk, then MSVC always uses the public access
+ // specifier, and we do the same.
+ AccessSpecifier AS = Thunk.Return.isEmpty() ? MD->getAccess() : AS_public;
+ mangleThunkThisAdjustment(AS, Thunk.This, Mangler, MHO);
+
if (!Thunk.Return.isEmpty())
assert(Thunk.Method != nullptr &&
"Thunk info should hold the overridee decl");
@@ -2881,7 +3046,7 @@ void MicrosoftMangleContextImpl::mangleCXXDtorThunk(
MicrosoftCXXNameMangler Mangler(*this, MHO, DD, Type);
Mangler.getStream() << "??_E";
Mangler.mangleName(DD->getParent());
- mangleThunkThisAdjustment(DD, Adjustment, Mangler, MHO);
+ mangleThunkThisAdjustment(DD->getAccess(), Adjustment, Mangler, MHO);
Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD);
}
@@ -3175,10 +3340,13 @@ void MicrosoftMangleContextImpl::mangleInitFiniStub(const VarDecl *D,
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
Mangler.getStream() << "??__" << CharCode;
- Mangler.mangleName(D);
if (D->isStaticDataMember()) {
+ Mangler.getStream() << '?';
+ Mangler.mangleName(D);
Mangler.mangleVariableEncoding(D);
- Mangler.getStream() << '@';
+ Mangler.getStream() << "@@";
+ } else {
+ Mangler.mangleName(D);
}
// This is the function class mangling. These stubs are global, non-variadic,
// cdecl functions that return void and take no args.
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index 94ad87b6e337..5b8300893e2d 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -475,6 +475,9 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -509,7 +512,7 @@ bool NSAPI::isObjCNSUIntegerType(QualType T) const {
}
StringRef NSAPI::GetNSIntegralKind(QualType T) const {
- if (!Ctx.getLangOpts().ObjC1 || T.isNull())
+ if (!Ctx.getLangOpts().ObjC || T.isNull())
return StringRef();
while (const TypedefType *TDT = T->getAs<TypedefType>()) {
@@ -561,7 +564,7 @@ bool NSAPI::isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
bool NSAPI::isObjCTypedef(QualType T,
StringRef name, IdentifierInfo *&II) const {
- if (!Ctx.getLangOpts().ObjC1)
+ if (!Ctx.getLangOpts().ObjC)
return false;
if (T.isNull())
return false;
@@ -580,7 +583,7 @@ bool NSAPI::isObjCTypedef(QualType T,
bool NSAPI::isObjCEnumerator(const Expr *E,
StringRef name, IdentifierInfo *&II) const {
- if (!Ctx.getLangOpts().ObjC1)
+ if (!Ctx.getLangOpts().ObjC)
return false;
if (!E)
return false;
@@ -607,3 +610,11 @@ Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
}
return Sel;
}
+
+Selector NSAPI::getOrInitNullarySelector(StringRef Id, Selector &Sel) const {
+ if (Sel.isNull()) {
+ IdentifierInfo *Ident = &Ctx.Idents.get(Id);
+ Sel = Ctx.Selectors.getSelector(0, &Ident);
+ }
+ return Sel;
+}
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index a514d57b6b6b..42f6a133d717 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
@@ -270,9 +271,8 @@ bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
/// Print this nested name specifier to the given output
/// stream.
-void
-NestedNameSpecifier::print(raw_ostream &OS,
- const PrintingPolicy &Policy) const {
+void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool ResolveTemplateArguments) const {
if (getPrefix())
getPrefix()->print(OS, Policy);
@@ -305,6 +305,15 @@ NestedNameSpecifier::print(raw_ostream &OS,
LLVM_FALLTHROUGH;
case TypeSpec: {
+ const auto *Record =
+ dyn_cast_or_null<ClassTemplateSpecializationDecl>(getAsRecordDecl());
+ if (ResolveTemplateArguments && Record) {
+ // Print the type trait with resolved template parameters.
+ Record->printName(OS);
+ printTemplateArgumentList(OS, Record->getTemplateArgs().asArray(),
+ Policy);
+ break;
+ }
const Type *T = getAsType();
PrintingPolicy InnerPolicy(Policy);
@@ -339,13 +348,20 @@ NestedNameSpecifier::print(raw_ostream &OS,
OS << "::";
}
-void NestedNameSpecifier::dump(const LangOptions &LO) const {
- print(llvm::errs(), PrintingPolicy(LO));
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(const LangOptions &LO) const {
+ dump(llvm::errs(), LO);
}
-LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const {
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS) const {
LangOptions LO;
- print(llvm::errs(), PrintingPolicy(LO));
+ dump(OS, LO);
+}
+
+LLVM_DUMP_METHOD void NestedNameSpecifier::dump(llvm::raw_ostream &OS,
+ const LangOptions &LO) const {
+ print(OS, PrintingPolicy(LO));
}
unsigned
@@ -446,9 +462,9 @@ SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
}
TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
- assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec ||
- Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) &&
- "Nested-name-specifier location is not a type");
+ if (Qualifier->getKind() != NestedNameSpecifier::TypeSpec &&
+ Qualifier->getKind() != NestedNameSpecifier::TypeSpecWithTemplate)
+ return TypeLoc();
// The "void*" that points at the TypeLoc data.
unsigned Offset = getDataLength(Qualifier->getPrefix());
@@ -547,6 +563,7 @@ operator=(const NestedNameSpecifierLocBuilder &Other) {
}
// Deep copy.
+ BufferSize = 0;
Append(Other.Buffer, Other.Buffer + Other.BufferSize, Buffer, BufferSize,
BufferCapacity);
return *this;
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
index e710d3780337..a4c344ce0a9b 100644
--- a/lib/AST/ODRHash.cpp
+++ b/lib/AST/ODRHash.cpp
@@ -32,12 +32,24 @@ void ODRHash::AddIdentifierInfo(const IdentifierInfo *II) {
ID.AddString(II->getName());
}
-void ODRHash::AddDeclarationName(DeclarationName Name) {
+void ODRHash::AddDeclarationName(DeclarationName Name, bool TreatAsDecl) {
+ if (TreatAsDecl)
+ // Matches the NamedDecl check in AddDecl
+ AddBoolean(true);
+
+ AddDeclarationNameImpl(Name);
+
+ if (TreatAsDecl)
+ // Matches the ClassTemplateSpecializationDecl check in AddDecl
+ AddBoolean(false);
+}
+
+void ODRHash::AddDeclarationNameImpl(DeclarationName Name) {
// Index all DeclarationName and use index numbers to refer to them.
auto Result = DeclNameMap.insert(std::make_pair(Name, DeclNameMap.size()));
ID.AddInteger(Result.first->second);
if (!Result.second) {
- // If found in map, the the DeclarationName has previously been processed.
+ // If found in map, the DeclarationName has previously been processed.
return;
}
@@ -405,6 +417,7 @@ public:
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
AddDecl(D->getTemplatedDecl());
+ ID.AddInteger(D->getTemplatedDecl()->getODRHash());
Inherited::VisitFunctionTemplateDecl(D);
}
@@ -552,11 +565,27 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
!Function->isDefaulted() && !Function->isDeleted() &&
!Function->isLateTemplateParsed();
AddBoolean(HasBody);
- if (HasBody) {
- auto *Body = Function->getBody();
- AddBoolean(Body);
- if (Body)
- AddStmt(Body);
+ if (!HasBody) {
+ return;
+ }
+
+ auto *Body = Function->getBody();
+ AddBoolean(Body);
+ if (Body)
+ AddStmt(Body);
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : Function->decls()) {
+ if (isWhitelistedDecl(SubDecl, Function)) {
+ Decls.push_back(SubDecl);
+ }
+ }
+
+ ID.AddInteger(Decls.size());
+ for (auto SubDecl : Decls) {
+ AddSubDecl(SubDecl);
}
}
@@ -592,13 +621,24 @@ void ODRHash::AddDecl(const Decl *D) {
assert(D && "Expecting non-null pointer.");
D = D->getCanonicalDecl();
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- AddDeclarationName(ND->getDeclName());
+ const NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ AddBoolean(ND);
+ if (!ND) {
+ ID.AddInteger(D->getKind());
return;
}
- ID.AddInteger(D->getKind());
- // TODO: Handle non-NamedDecl here.
+ AddDeclarationName(ND->getDeclName());
+
+ const auto *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(D);
+ AddBoolean(Specialization);
+ if (Specialization) {
+ const TemplateArgumentList &List = Specialization->getTemplateArgs();
+ ID.AddInteger(List.size());
+ for (const TemplateArgument &TA : List.asArray())
+ AddTemplateArgument(TA);
+ }
}
namespace {
@@ -700,11 +740,67 @@ public:
VisitArrayType(T);
}
+ void VisitAttributedType(const AttributedType *T) {
+ ID.AddInteger(T->getAttrKind());
+ AddQualType(T->getModifiedType());
+ AddQualType(T->getEquivalentType());
+
+ VisitType(T);
+ }
+
+ void VisitBlockPointerType(const BlockPointerType *T) {
+ AddQualType(T->getPointeeType());
+ VisitType(T);
+ }
+
void VisitBuiltinType(const BuiltinType *T) {
ID.AddInteger(T->getKind());
VisitType(T);
}
+ void VisitComplexType(const ComplexType *T) {
+ AddQualType(T->getElementType());
+ VisitType(T);
+ }
+
+ void VisitDecltypeType(const DecltypeType *T) {
+ AddStmt(T->getUnderlyingExpr());
+ AddQualType(T->getUnderlyingType());
+ VisitType(T);
+ }
+
+ void VisitDependentDecltypeType(const DependentDecltypeType *T) {
+ VisitDecltypeType(T);
+ }
+
+ void VisitDeducedType(const DeducedType *T) {
+ AddQualType(T->getDeducedType());
+ VisitType(T);
+ }
+
+ void VisitAutoType(const AutoType *T) {
+ ID.AddInteger((unsigned)T->getKeyword());
+ VisitDeducedType(T);
+ }
+
+ void VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T) {
+ Hash.AddTemplateName(T->getTemplateName());
+ VisitDeducedType(T);
+ }
+
+ void VisitDependentAddressSpaceType(const DependentAddressSpaceType *T) {
+ AddQualType(T->getPointeeType());
+ AddStmt(T->getAddrSpaceExpr());
+ VisitType(T);
+ }
+
+ void VisitDependentSizedExtVectorType(const DependentSizedExtVectorType *T) {
+ AddQualType(T->getElementType());
+ AddStmt(T->getSizeExpr());
+ VisitType(T);
+ }
+
void VisitFunctionType(const FunctionType *T) {
AddQualType(T->getReturnType());
T->getExtInfo().Profile(ID);
@@ -726,6 +822,74 @@ public:
VisitFunctionType(T);
}
+ void VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ AddDecl(T->getDecl());
+ VisitType(T);
+ }
+
+ void VisitMemberPointerType(const MemberPointerType *T) {
+ AddQualType(T->getPointeeType());
+ AddType(T->getClass());
+ VisitType(T);
+ }
+
+ void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ AddQualType(T->getPointeeType());
+ VisitType(T);
+ }
+
+ void VisitObjCObjectType(const ObjCObjectType *T) {
+ AddDecl(T->getInterface());
+
+ auto TypeArgs = T->getTypeArgsAsWritten();
+ ID.AddInteger(TypeArgs.size());
+ for (auto Arg : TypeArgs) {
+ AddQualType(Arg);
+ }
+
+ auto Protocols = T->getProtocols();
+ ID.AddInteger(Protocols.size());
+ for (auto Protocol : Protocols) {
+ AddDecl(Protocol);
+ }
+
+ Hash.AddBoolean(T->isKindOfType());
+
+ VisitType(T);
+ }
+
+ void VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ // This type is handled by the parent type ObjCObjectType.
+ VisitObjCObjectType(T);
+ }
+
+ void VisitObjCTypeParamType(const ObjCTypeParamType *T) {
+ AddDecl(T->getDecl());
+ auto Protocols = T->getProtocols();
+ ID.AddInteger(Protocols.size());
+ for (auto Protocol : Protocols) {
+ AddDecl(Protocol);
+ }
+
+ VisitType(T);
+ }
+
+ void VisitPackExpansionType(const PackExpansionType *T) {
+ AddQualType(T->getPattern());
+ VisitType(T);
+ }
+
+ void VisitParenType(const ParenType *T) {
+ AddQualType(T->getInnerType());
+ VisitType(T);
+ }
+
+ void VisitPipeType(const PipeType *T) {
+ AddQualType(T->getElementType());
+ Hash.AddBoolean(T->isReadOnly());
+ VisitType(T);
+ }
+
void VisitPointerType(const PointerType *T) {
AddQualType(T->getPointeeType());
VisitType(T);
@@ -744,6 +908,43 @@ public:
VisitReferenceType(T);
}
+ void
+ VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
+ AddType(T->getReplacedParameter());
+ Hash.AddTemplateArgument(T->getArgumentPack());
+ VisitType(T);
+ }
+
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ AddType(T->getReplacedParameter());
+ AddQualType(T->getReplacementType());
+ VisitType(T);
+ }
+
+ void VisitTagType(const TagType *T) {
+ AddDecl(T->getDecl());
+ VisitType(T);
+ }
+
+ void VisitRecordType(const RecordType *T) { VisitTagType(T); }
+ void VisitEnumType(const EnumType *T) { VisitTagType(T); }
+
+ void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ ID.AddInteger(T->getNumArgs());
+ for (const auto &TA : T->template_arguments()) {
+ Hash.AddTemplateArgument(TA);
+ }
+ Hash.AddTemplateName(T->getTemplateName());
+ VisitType(T);
+ }
+
+ void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ ID.AddInteger(T->getDepth());
+ ID.AddInteger(T->getIndex());
+ Hash.AddBoolean(T->isParameterPack());
+ AddDecl(T->getDecl());
+ }
+
void VisitTypedefType(const TypedefType *T) {
AddDecl(T->getDecl());
QualType UnderlyingType = T->getDecl()->getUnderlyingType();
@@ -766,13 +967,18 @@ public:
VisitType(T);
}
- void VisitTagType(const TagType *T) {
- AddDecl(T->getDecl());
+ void VisitTypeOfExprType(const TypeOfExprType *T) {
+ AddStmt(T->getUnderlyingExpr());
+ Hash.AddBoolean(T->isSugared());
+ if (T->isSugared())
+ AddQualType(T->desugar());
+
+ VisitType(T);
+ }
+ void VisitTypeOfType(const TypeOfType *T) {
+ AddQualType(T->getUnderlyingType());
VisitType(T);
}
-
- void VisitRecordType(const RecordType *T) { VisitTagType(T); }
- void VisitEnumType(const EnumType *T) { VisitTagType(T); }
void VisitTypeWithKeyword(const TypeWithKeyword *T) {
ID.AddInteger(T->getKeyword());
@@ -802,20 +1008,26 @@ public:
VisitTypeWithKeyword(T);
}
- void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
- ID.AddInteger(T->getNumArgs());
- for (const auto &TA : T->template_arguments()) {
- Hash.AddTemplateArgument(TA);
- }
- Hash.AddTemplateName(T->getTemplateName());
+ void VisitUnaryTransformType(const UnaryTransformType *T) {
+ AddQualType(T->getUnderlyingType());
+ AddQualType(T->getBaseType());
VisitType(T);
}
- void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
- ID.AddInteger(T->getDepth());
- ID.AddInteger(T->getIndex());
- Hash.AddBoolean(T->isParameterPack());
+ void VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
AddDecl(T->getDecl());
+ VisitType(T);
+ }
+
+ void VisitVectorType(const VectorType *T) {
+ AddQualType(T->getElementType());
+ ID.AddInteger(T->getNumElements());
+ ID.AddInteger(T->getVectorKind());
+ VisitType(T);
+ }
+
+ void VisitExtVectorType(const ExtVectorType * T) {
+ VisitVectorType(T);
}
};
} // namespace
diff --git a/lib/Analysis/OSLog.cpp b/lib/AST/OSLog.cpp
index b2983932ea22..df2f808728cf 100644
--- a/lib/Analysis/OSLog.cpp
+++ b/lib/AST/OSLog.cpp
@@ -1,11 +1,11 @@
// TODO: header template
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/OSLog.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprObjC.h"
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
#include "clang/Basic/Builtins.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -26,6 +26,7 @@ private:
Optional<const Expr *> Precision;
Optional<const Expr *> FieldWidth;
unsigned char Flags = 0;
+ StringRef MaskType;
};
SmallVector<ArgData, 4> ArgsData;
ArrayRef<const Expr *> Args;
@@ -120,18 +121,26 @@ public:
ArgsData.back().FieldWidth = Args[FS.getFieldWidth().getArgIndex()];
}
- if (FS.isPrivate()) {
+ if (FS.isSensitive())
+ ArgsData.back().Flags |= OSLogBufferItem::IsSensitive;
+ else if (FS.isPrivate())
ArgsData.back().Flags |= OSLogBufferItem::IsPrivate;
- }
- if (FS.isPublic()) {
+ else if (FS.isPublic())
ArgsData.back().Flags |= OSLogBufferItem::IsPublic;
- }
+
+ ArgsData.back().MaskType = FS.getMaskType();
return true;
}
void computeLayout(ASTContext &Ctx, OSLogBufferLayout &Layout) const {
Layout.Items.clear();
for (auto &Data : ArgsData) {
+ if (!Data.MaskType.empty()) {
+ CharUnits Size = CharUnits::fromQuantity(8);
+ Layout.Items.emplace_back(OSLogBufferItem::MaskKind, nullptr,
+ Size, 0, Data.MaskType);
+ }
+
if (Data.FieldWidth) {
CharUnits Size = Ctx.getTypeSizeInChars((*Data.FieldWidth)->getType());
Layout.Items.emplace_back(OSLogBufferItem::ScalarKind, *Data.FieldWidth,
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index 50729264bfe1..76098f15bf36 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
@@ -106,6 +107,11 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
break;
}
@@ -175,12 +181,68 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
break;
}
return nullptr;
}
+OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num,
+ unsigned NumLoops,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * NumLoops));
+ auto *Clause =
+ new (Mem) OMPOrderedClause(Num, NumLoops, StartLoc, LParenLoc, EndLoc);
+ for (unsigned I = 0; I < NumLoops; ++I) {
+ Clause->setLoopNumIterations(I, nullptr);
+ Clause->setLoopCounter(I, nullptr);
+ }
+ return Clause;
+}
+
+OMPOrderedClause *OMPOrderedClause::CreateEmpty(const ASTContext &C,
+ unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * NumLoops));
+ auto *Clause = new (Mem) OMPOrderedClause(NumLoops);
+ for (unsigned I = 0; I < NumLoops; ++I) {
+ Clause->setLoopNumIterations(I, nullptr);
+ Clause->setLoopCounter(I, nullptr);
+ }
+ return Clause;
+}
+
+void OMPOrderedClause::setLoopNumIterations(unsigned NumLoop,
+ Expr *NumIterations) {
+ assert(NumLoop < NumberOfLoops && "out of loops number.");
+ getTrailingObjects<Expr *>()[NumLoop] = NumIterations;
+}
+
+ArrayRef<Expr *> OMPOrderedClause::getLoopNumIterations() const {
+ return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops);
+}
+
+void OMPOrderedClause::setLoopCounter(unsigned NumLoop, Expr *Counter) {
+ assert(NumLoop < NumberOfLoops && "out of loops number.");
+ getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop] = Counter;
+}
+
+Expr *OMPOrderedClause::getLoopCounter(unsigned NumLoop) {
+ assert(NumLoop < NumberOfLoops && "out of loops number.");
+ return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
+}
+
+const Expr *OMPOrderedClause::getLoopCounter(unsigned NumLoop) const {
+ assert(NumLoop < NumberOfLoops && "out of loops number.");
+ return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
+}
+
void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
@@ -653,44 +715,58 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPFlushClause(N);
}
-OMPDependClause *OMPDependClause::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + 1));
- OMPDependClause *Clause =
- new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
+OMPDependClause *
+OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops));
+ OMPDependClause *Clause = new (Mem)
+ OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
Clause->setVarRefs(VL);
Clause->setDependencyKind(DepKind);
Clause->setDependencyLoc(DepLoc);
Clause->setColonLoc(ColonLoc);
- Clause->setCounterValue(nullptr);
+ for (unsigned I = 0 ; I < NumLoops; ++I)
+ Clause->setLoopData(I, nullptr);
return Clause;
}
-OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + 1));
- return new (Mem) OMPDependClause(N);
+OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N,
+ unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops));
+ return new (Mem) OMPDependClause(N, NumLoops);
}
-void OMPDependClause::setCounterValue(Expr *V) {
- assert(getDependencyKind() == OMPC_DEPEND_sink ||
- getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
- *getVarRefs().end() = V;
+void OMPDependClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
+ assert((getDependencyKind() == OMPC_DEPEND_sink ||
+ getDependencyKind() == OMPC_DEPEND_source) &&
+ NumLoop < NumLoops &&
+ "Expected sink or source depend + loop index must be less number of "
+ "loops.");
+ auto It = std::next(getVarRefs().end(), NumLoop);
+ *It = Cnt;
}
-const Expr *OMPDependClause::getCounterValue() const {
- auto *V = *getVarRefs().end();
- assert(getDependencyKind() == OMPC_DEPEND_sink ||
- getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
- return V;
+Expr *OMPDependClause::getLoopData(unsigned NumLoop) {
+ assert((getDependencyKind() == OMPC_DEPEND_sink ||
+ getDependencyKind() == OMPC_DEPEND_source) &&
+ NumLoop < NumLoops &&
+ "Expected sink or source depend + loop index must be less number of "
+ "loops.");
+ auto It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
}
-Expr *OMPDependClause::getCounterValue() {
- auto *V = *getVarRefs().end();
- assert(getDependencyKind() == OMPC_DEPEND_sink ||
- getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
- return V;
+const Expr *OMPDependClause::getLoopData(unsigned NumLoop) const {
+ assert((getDependencyKind() == OMPC_DEPEND_sink ||
+ getDependencyKind() == OMPC_DEPEND_source) &&
+ NumLoop < NumLoops &&
+ "Expected sink or source depend + loop index must be less number of "
+ "loops.");
+ auto It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
}
unsigned OMPClauseMappableExprCommon::getComponentsTotalNumber(
@@ -720,8 +796,10 @@ OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
- OpenMPMapClauseKind TypeModifier, OpenMPMapClauseKind Type,
- bool TypeIsImplicit, SourceLocation TypeLoc) {
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ ArrayRef<SourceLocation> MapModifiersLoc,
+ OpenMPMapClauseKind Type, bool TypeIsImplicit,
+ SourceLocation TypeLoc) {
unsigned NumVars = Vars.size();
unsigned NumUniqueDeclarations =
getUniqueDeclarationsTotalNumber(Declarations);
@@ -744,12 +822,12 @@ OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
NumVars, NumUniqueDeclarations,
NumUniqueDeclarations + NumComponentLists, NumComponents));
OMPMapClause *Clause = new (Mem) OMPMapClause(
- TypeModifier, Type, TypeIsImplicit, TypeLoc, StartLoc, LParenLoc, EndLoc,
- NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents);
+ MapModifiers, MapModifiersLoc, Type, TypeIsImplicit, TypeLoc, StartLoc,
+ LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists,
+ NumComponents);
Clause->setVarRefs(Vars);
Clause->setClauseInfo(Declarations, ComponentLists);
- Clause->setMapTypeModifier(TypeModifier);
Clause->setMapType(Type);
Clause->setMapLoc(TypeLoc);
return Clause;
@@ -976,3 +1054,444 @@ OMPIsDevicePtrClause *OMPIsDevicePtrClause::CreateEmpty(
return new (Mem) OMPIsDevicePtrClause(NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents);
}
+
+//===----------------------------------------------------------------------===//
+// OpenMP clauses printing methods
+//===----------------------------------------------------------------------===//
+
+void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
+ OS << "if(";
+ if (Node->getNameModifier() != OMPD_unknown)
+ OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
+ OS << "final(";
+ Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
+ OS << "num_threads(";
+ Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
+ OS << "safelen(";
+ Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
+ OS << "simdlen(";
+ Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
+ OS << "collapse(";
+ Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
+ OS << "default("
+ << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
+ OS << "proc_bind("
+ << getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {
+ OS << "unified_address";
+}
+
+void OMPClausePrinter::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {
+ OS << "unified_shared_memory";
+}
+
+void OMPClausePrinter::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {
+ OS << "reverse_offload";
+}
+
+void OMPClausePrinter::VisitOMPDynamicAllocatorsClause(
+ OMPDynamicAllocatorsClause *) {
+ OS << "dynamic_allocators";
+}
+
+void OMPClausePrinter::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *Node) {
+ OS << "atomic_default_mem_order("
+ << getOpenMPSimpleClauseTypeName(OMPC_atomic_default_mem_order,
+ Node->getAtomicDefaultMemOrderKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
+ OS << "schedule(";
+ if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getFirstScheduleModifier());
+ if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
+ OS << ", ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
+ Node->getSecondScheduleModifier());
+ }
+ OS << ": ";
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
+ if (auto *E = Node->getChunkSize()) {
+ OS << ", ";
+ E->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
+ OS << "ordered";
+ if (auto *Num = Node->getNumForLoops()) {
+ OS << "(";
+ Num->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
+ OS << "nowait";
+}
+
+void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
+ OS << "untied";
+}
+
+void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
+ OS << "nogroup";
+}
+
+void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
+ OS << "mergeable";
+}
+
+void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
+
+void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
+
+void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
+ OS << "update";
+}
+
+void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
+ OS << "capture";
+}
+
+void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
+ OS << "seq_cst";
+}
+
+void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
+ OS << "threads";
+}
+
+void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
+
+void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
+ OS << "device(";
+ Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
+ OS << "num_teams(";
+ Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
+ OS << "thread_limit(";
+ Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
+ OS << "priority(";
+ Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
+ OS << "grainsize(";
+ Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
+ OS << "num_tasks(";
+ Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
+ OS << "hint(";
+ Node->getHint()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+template<typename T>
+void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
+ for (typename T::varlist_iterator I = Node->varlist_begin(),
+ E = Node->varlist_end();
+ I != E; ++I) {
+ assert(*I && "Expected non-null Stmt");
+ OS << (I == Node->varlist_begin() ? StartSym : ',');
+ if (auto *DRE = dyn_cast<DeclRefExpr>(*I)) {
+ if (isa<OMPCapturedExprDecl>(DRE->getDecl()))
+ DRE->printPretty(OS, nullptr, Policy, 0);
+ else
+ DRE->getDecl()->printQualifiedName(OS);
+ } else
+ (*I)->printPretty(OS, nullptr, Policy, 0);
+ }
+}
+
+void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "private";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "firstprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "lastprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "shared";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPTaskReductionClause(
+ OMPTaskReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "task_reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "in_reduction(";
+ NestedNameSpecifier *QualifierLoc =
+ Node->getQualifierLoc().getNestedNameSpecifier();
+ OverloadedOperatorKind OOK =
+ Node->getNameInfo().getName().getCXXOverloadedOperator();
+ if (QualifierLoc == nullptr && OOK != OO_None) {
+ // Print reduction identifier in C format
+ OS << getOperatorSpelling(OOK);
+ } else {
+ // Use C++ format
+ if (QualifierLoc != nullptr)
+ QualifierLoc->print(OS, Policy);
+ OS << Node->getNameInfo();
+ }
+ OS << ":";
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "linear";
+ if (Node->getModifierLoc().isValid()) {
+ OS << '('
+ << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
+ }
+ VisitOMPClauseList(Node, '(');
+ if (Node->getModifierLoc().isValid())
+ OS << ')';
+ if (Node->getStep() != nullptr) {
+ OS << ": ";
+ Node->getStep()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "aligned";
+ VisitOMPClauseList(Node, '(');
+ if (Node->getAlignment() != nullptr) {
+ OS << ": ";
+ Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
+ }
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyin";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "copyprivate";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
+ if (!Node->varlist_empty()) {
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
+ OS << "depend(";
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
+ Node->getDependencyKind());
+ if (!Node->varlist_empty()) {
+ OS << " :";
+ VisitOMPClauseList(Node, ' ');
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "map(";
+ if (Node->getMapType() != OMPC_MAP_unknown) {
+ for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map,
+ Node->getMapTypeModifier(I));
+ OS << ',';
+ }
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
+ OS << ':';
+ }
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "to";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "from";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPDistScheduleClause(OMPDistScheduleClause *Node) {
+ OS << "dist_schedule(" << getOpenMPSimpleClauseTypeName(
+ OMPC_dist_schedule, Node->getDistScheduleKind());
+ if (auto *E = Node->getChunkSize()) {
+ OS << ", ";
+ E->printPretty(OS, nullptr, Policy);
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
+ OS << "defaultmap(";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapModifier());
+ OS << ": ";
+ OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
+ Node->getDefaultmapKind());
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "use_device_ptr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "is_device_ptr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
index af2a480dc23f..88c178aa372f 100644
--- a/lib/AST/ParentMap.cpp
+++ b/lib/AST/ParentMap.cpp
@@ -163,7 +163,7 @@ bool ParentMap::isConsumedExpr(Expr* E) const {
// Ignore parents that don't guarantee consumption.
while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P) ||
- isa<ExprWithCleanups>(P))) {
+ isa<FullExpr>(P))) {
DirectChild = P;
P = getParent(P);
}
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/AST/PrintfFormatString.cpp
index dcb15c5e3758..e0a0c5b7582a 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/AST/PrintfFormatString.cpp
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/Analyses/FormatString.h"
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/FormatString.h"
+#include "clang/AST/OSLog.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
@@ -127,7 +127,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
do {
StringRef Str(I, E - I);
- std::string Match = "^[\t\n\v\f\r ]*(private|public)[\t\n\v\f\r ]*(,|})";
+ std::string Match = "^[[:space:]]*"
+ "(private|public|sensitive|mask\\.[^[:space:],}]*)"
+ "[[:space:]]*(,|})";
llvm::Regex R(Match);
SmallVector<StringRef, 2> Matches;
@@ -138,7 +140,17 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Set the privacy flag if the privacy annotation in the
// comma-delimited segment is at least as strict as the privacy
// annotations in previous comma-delimited segments.
- if (MatchedStr.equals("private"))
+ if (MatchedStr.startswith("mask")) {
+ StringRef MaskType = MatchedStr.substr(sizeof("mask.") - 1);
+ unsigned Size = MaskType.size();
+ if (Warn && (Size == 0 || Size > 8))
+ H.handleInvalidMaskType(MaskType);
+ FS.setMaskType(MaskType);
+ } else if (MatchedStr.equals("sensitive"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsSensitive;
+ else if (PrivacyFlags !=
+ clang::analyze_os_log::OSLogBufferItem::IsSensitive &&
+ MatchedStr.equals("private"))
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate;
else if (PrivacyFlags == 0 && MatchedStr.equals("public"))
PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic;
@@ -168,6 +180,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case clang::analyze_os_log::OSLogBufferItem::IsPublic:
FS.setIsPublic(MatchedStr.data());
break;
+ case clang::analyze_os_log::OSLogBufferItem::IsSensitive:
+ FS.setIsSensitive(MatchedStr.data());
+ break;
default:
llvm_unreachable("Unexpected privacy flag value");
}
@@ -232,6 +247,9 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
}
}
+ if (ParseVectorModifier(H, FS, I, E, LO))
+ return true;
+
// Look for the length modifier.
if (ParseLengthModifier(FS, I, E, LO) && I == E) {
// No more characters left?
@@ -347,6 +365,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 'Z':
if (Target.getTriple().isOSMSVCRT())
k = ConversionSpecifier::ZArg;
+ break;
}
// Check to see if we used the Objective-C modifier flags with
@@ -445,13 +464,8 @@ bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
-ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
- bool IsObjCLiteral) const {
- const PrintfConversionSpecifier &CS = getConversionSpecifier();
-
- if (!CS.consumesDataArgument())
- return ArgType::Invalid();
-
+ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None:
@@ -611,6 +625,21 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
return ArgType();
}
+
+ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
+ const PrintfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ArgType::Invalid();
+
+ ArgType ScalarTy = getScalarArgType(Ctx, IsObjCLiteral);
+ if (!ScalarTy.isValid() || VectorNumElts.isInvalid())
+ return ScalarTy;
+
+ return ScalarTy.makeVectorType(Ctx, VectorNumElts.getConstantAmount());
+}
+
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
ASTContext &Ctx, bool IsObjCLiteral) {
// %n is different from other conversion specifiers; don't try to fix it.
@@ -660,8 +689,17 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
if (const EnumType *ETy = QT->getAs<EnumType>())
QT = ETy->getDecl()->getIntegerType();
- // We can only work with builtin types.
const BuiltinType *BT = QT->getAs<BuiltinType>();
+ if (!BT) {
+ const VectorType *VT = QT->getAs<VectorType>();
+ if (VT) {
+ QT = VT->getElementType();
+ BT = QT->getAs<BuiltinType>();
+ VectorNumElts = OptionalAmount(VT->getNumElements());
+ }
+ }
+
+ // We can only work with builtin types.
if (!BT)
return false;
@@ -708,6 +746,9 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
@@ -830,6 +871,11 @@ void PrintfSpecifier::toString(raw_ostream &os) const {
FieldWidth.toString(os);
// Precision
Precision.toString(os);
+
+ // Vector modifier
+ if (!VectorNumElts.isInvalid())
+ os << 'v' << VectorNumElts.getConstantAmount();
+
// Length modifier
os << LM.toString();
// Conversion specifier
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 95da9ed6d238..ab873a396419 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -68,8 +68,8 @@ bool mergedCommentIsTrailingComment(StringRef Comment) {
/// column.
bool commentsStartOnSameColumn(const SourceManager &SM, const RawComment &R1,
const RawComment &R2) {
- SourceLocation L1 = R1.getLocStart();
- SourceLocation L2 = R2.getLocStart();
+ SourceLocation L1 = R1.getBeginLoc();
+ SourceLocation L2 = R2.getBeginLoc();
bool Invalid = false;
unsigned C1 = SM.getPresumedColumnNumber(L1, &Invalid);
if (!Invalid) {
@@ -278,8 +278,8 @@ void RawCommentList::addComment(const RawComment &RC,
// Check if the comments are not in source order.
while (!Comments.empty() &&
- !SourceMgr.isBeforeInTranslationUnit(Comments.back()->getLocStart(),
- RC.getLocStart())) {
+ !SourceMgr.isBeforeInTranslationUnit(Comments.back()->getBeginLoc(),
+ RC.getBeginLoc())) {
// If they are, just pop a few last comments that don't fit.
// This happens if an \#include directive contains comments.
Comments.pop_back();
@@ -316,9 +316,9 @@ void RawCommentList::addComment(const RawComment &RC,
(C1.isTrailingComment() && !C2.isTrailingComment() &&
isOrdinaryKind(C2.getKind()) &&
commentsStartOnSameColumn(SourceMgr, C1, C2))) &&
- onlyWhitespaceBetween(SourceMgr, C1.getLocEnd(), C2.getLocStart(),
+ onlyWhitespaceBetween(SourceMgr, C1.getEndLoc(), C2.getBeginLoc(),
/*MaxNewlinesAllowed=*/1)) {
- SourceRange MergedRange(C1.getLocStart(), C2.getLocEnd());
+ SourceRange MergedRange(C1.getBeginLoc(), C2.getEndLoc());
*Comments.back() = RawComment(SourceMgr, MergedRange, CommentOpts, true);
} else {
Comments.push_back(new (Allocator) RawComment(RC));
@@ -415,7 +415,7 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
Str.pop_back();
};
- // Proces first line separately to remember indent for the following lines.
+ // Process first line separately to remember indent for the following lines.
if (!LexLine(/*IsFirstLine=*/true)) {
DropTrailingNewLines(Result);
return Result;
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 6f71d5b83e62..62dc22c81403 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -9,6 +9,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
@@ -16,7 +17,6 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
@@ -2829,15 +2829,14 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
CharUnits BaseOffset;
// Respect the external AST source base offset, if present.
- bool FoundBase = false;
if (UseExternalLayout) {
- FoundBase = External.getExternalVBaseOffset(BaseDecl, BaseOffset);
- if (FoundBase)
- assert(BaseOffset >= Size && "base offset already allocated");
- }
- if (!FoundBase)
+ if (!External.getExternalVBaseOffset(BaseDecl, BaseOffset))
+ BaseOffset = Size;
+ } else
BaseOffset = Size.alignTo(Info.Alignment);
+ assert(BaseOffset >= Size && "base offset already allocated");
+
VBases.insert(std::make_pair(BaseDecl,
ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
Size = BaseOffset + BaseLayout.getNonVirtualSize();
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/AST/ScanfFormatString.cpp
index a9af0cdfdacd..08ba7a7a4f5c 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/AST/ScanfFormatString.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/FormatString.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
@@ -264,6 +264,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsWide:
return ArgType::Invalid();
}
+ llvm_unreachable("Unsupported LenghtModifier Type");
// Unsigned int.
case ConversionSpecifier::oArg:
@@ -303,6 +304,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsWide:
return ArgType::Invalid();
}
+ llvm_unreachable("Unsupported LenghtModifier Type");
// Float.
case ConversionSpecifier::aArg:
diff --git a/lib/AST/SelectorLocationsKind.cpp b/lib/AST/SelectorLocationsKind.cpp
index 671207a7f2d9..8b72c85d7ef7 100644
--- a/lib/AST/SelectorLocationsKind.cpp
+++ b/lib/AST/SelectorLocationsKind.cpp
@@ -49,12 +49,12 @@ SourceLocation getArgLoc(T* Arg);
template <>
SourceLocation getArgLoc<Expr>(Expr *Arg) {
- return Arg->getLocStart();
+ return Arg->getBeginLoc();
}
template <>
SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) {
- SourceLocation Loc = Arg->getLocStart();
+ SourceLocation Loc = Arg->getBeginLoc();
if (Loc.isInvalid())
return Loc;
// -1 to point to left paren of the method parameter's type.
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index a041006c905e..116291bfa1ef 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -76,6 +76,14 @@ const char *Stmt::getStmtClassName() const {
return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
}
+// Check that no statement / expression class is polymorphic. LLVM style RTTI
+// should be used instead. If absolutely needed an exception can still be added
+// here by defining the appropriate macro (but please don't do this).
+#define STMT(CLASS, PARENT) \
+ static_assert(!std::is_polymorphic<CLASS>::value, \
+ #CLASS " should not be polymorphic!");
+#include "clang/AST/StmtNodes.inc"
+
void Stmt::PrintStats() {
// Ensure the table is primed.
getStmtInfoTableEntry(Stmt::NullStmtClass);
@@ -113,17 +121,23 @@ void Stmt::EnableStatistics() {
Stmt *Stmt::IgnoreImplicit() {
Stmt *s = this;
- if (auto *ewc = dyn_cast<ExprWithCleanups>(s))
- s = ewc->getSubExpr();
+ Stmt *lasts = nullptr;
- if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
- s = mte->GetTemporaryExpr();
+ while (s != lasts) {
+ lasts = s;
- if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s))
- s = bte->getSubExpr();
+ if (auto *fe = dyn_cast<FullExpr>(s))
+ s = fe->getSubExpr();
- while (auto *ice = dyn_cast<ImplicitCastExpr>(s))
- s = ice->getSubExpr();
+ if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s))
+ s = mte->GetTemporaryExpr();
+
+ if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s))
+ s = bte->getSubExpr();
+
+ if (auto *ice = dyn_cast<ImplicitCastExpr>(s))
+ s = ice->getSubExpr();
+ }
return s;
}
@@ -185,30 +199,26 @@ namespace {
return bad();
}
- typedef SourceLocation getLocStart_t() const;
- template <class T> good implements_getLocStart(getLocStart_t T::*) {
+ typedef SourceLocation getBeginLoc_t() const;
+ template <class T> good implements_getBeginLoc(getBeginLoc_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
- static bad implements_getLocStart(getLocStart_t Stmt::*) {
- return bad();
- }
+ static bad implements_getBeginLoc(getBeginLoc_t Stmt::*) { return bad(); }
typedef SourceLocation getLocEnd_t() const;
- template <class T> good implements_getLocEnd(getLocEnd_t T::*) {
+ template <class T> good implements_getEndLoc(getLocEnd_t T::*) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
- static bad implements_getLocEnd(getLocEnd_t Stmt::*) {
- return bad();
- }
+ static bad implements_getEndLoc(getLocEnd_t Stmt::*) { return bad(); }
#define ASSERT_IMPLEMENTS_children(type) \
(void) is_good(implements_children(&type::children))
-#define ASSERT_IMPLEMENTS_getLocStart(type) \
- (void) is_good(implements_getLocStart(&type::getLocStart))
-#define ASSERT_IMPLEMENTS_getLocEnd(type) \
- (void) is_good(implements_getLocEnd(&type::getLocEnd))
+#define ASSERT_IMPLEMENTS_getBeginLoc(type) \
+ (void)is_good(implements_getBeginLoc(&type::getBeginLoc))
+#define ASSERT_IMPLEMENTS_getEndLoc(type) \
+ (void)is_good(implements_getEndLoc(&type::getEndLoc))
} // namespace
@@ -217,10 +227,10 @@ namespace {
LLVM_ATTRIBUTE_UNUSED
static inline void check_implementations() {
#define ABSTRACT_STMT(type)
-#define STMT(type, base) \
- ASSERT_IMPLEMENTS_children(type); \
- ASSERT_IMPLEMENTS_getLocStart(type); \
- ASSERT_IMPLEMENTS_getLocEnd(type);
+#define STMT(type, base) \
+ ASSERT_IMPLEMENTS_children(type); \
+ ASSERT_IMPLEMENTS_getBeginLoc(type); \
+ ASSERT_IMPLEMENTS_getEndLoc(type);
#include "clang/AST/StmtNodes.inc"
}
@@ -257,8 +267,8 @@ namespace {
template <class S>
SourceRange getSourceRangeImpl(const Stmt *stmt,
SourceRange (Stmt::*v)() const) {
- return SourceRange(static_cast<const S*>(stmt)->getLocStart(),
- static_cast<const S*>(stmt)->getLocEnd());
+ return SourceRange(static_cast<const S *>(stmt)->getBeginLoc(),
+ static_cast<const S *>(stmt)->getEndLoc());
}
} // namespace
@@ -275,36 +285,41 @@ SourceRange Stmt::getSourceRange() const {
llvm_unreachable("unknown statement kind!");
}
-SourceLocation Stmt::getLocStart() const {
-// llvm::errs() << "getLocStart() for " << getStmtClassName() << "\n";
+SourceLocation Stmt::getBeginLoc() const {
+ // llvm::errs() << "getBeginLoc() for " << getStmtClassName() << "\n";
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
-#define STMT(type, base) \
- case Stmt::type##Class: \
- return static_cast<const type*>(this)->getLocStart();
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type *>(this)->getBeginLoc();
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind");
}
-SourceLocation Stmt::getLocEnd() const {
+SourceLocation Stmt::getEndLoc() const {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
-#define STMT(type, base) \
- case Stmt::type##Class: \
- return static_cast<const type*>(this)->getLocEnd();
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type *>(this)->getEndLoc();
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown statement kind");
}
+int64_t Stmt::getID(const ASTContext &Context) const {
+ return Context.getAllocator().identifyKnownAlignedObject<Stmt>(this);
+}
+
CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB,
SourceLocation RB)
- : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
+ : Stmt(CompoundStmtClass), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
setStmts(Stmts);
+ CompoundStmtBits.LBraceLoc = LB;
}
void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
@@ -789,51 +804,99 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
});
}
-IfStmt::IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr,
- Stmt *init, VarDecl *var, Expr *cond, Stmt *then,
- SourceLocation EL, Stmt *elsev)
- : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL) {
+IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr,
+ Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then,
+ SourceLocation EL, Stmt *Else)
+ : Stmt(IfStmtClass) {
+ bool HasElse = Else != nullptr;
+ bool HasVar = Var != nullptr;
+ bool HasInit = Init != nullptr;
+ IfStmtBits.HasElse = HasElse;
+ IfStmtBits.HasVar = HasVar;
+ IfStmtBits.HasInit = HasInit;
+
setConstexpr(IsConstexpr);
- setConditionVariable(C, var);
- SubExprs[INIT] = init;
- SubExprs[COND] = cond;
- SubExprs[THEN] = then;
- SubExprs[ELSE] = elsev;
-}
-VarDecl *IfStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
+ setCond(Cond);
+ setThen(Then);
+ if (HasElse)
+ setElse(Else);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+ if (HasInit)
+ setInit(Init);
+
+ setIfLoc(IL);
+ if (HasElse)
+ setElseLoc(EL);
+}
+
+IfStmt::IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit)
+ : Stmt(IfStmtClass, Empty) {
+ IfStmtBits.HasElse = HasElse;
+ IfStmtBits.HasVar = HasVar;
+ IfStmtBits.HasInit = HasInit;
+}
+
+IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
+ bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
+ Stmt *Then, SourceLocation EL, Stmt *Else) {
+ bool HasElse = Else != nullptr;
+ bool HasVar = Var != nullptr;
+ bool HasInit = Init != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
+ alignof(IfStmt));
+ return new (Mem)
+ IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, Then, EL, Else);
+}
+
+IfStmt *IfStmt::CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
+ bool HasInit) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
+ alignof(IfStmt));
+ return new (Mem) IfStmt(EmptyShell(), HasElse, HasVar, HasInit);
+}
+
+VarDecl *IfStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
return nullptr;
-
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
return cast<VarDecl>(DS->getSingleDecl());
}
-void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void IfStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This if statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
bool IfStmt::isObjCAvailabilityCheck() const {
- return isa<ObjCAvailabilityCheckExpr>(SubExprs[COND]);
+ return isa<ObjCAvailabilityCheckExpr>(getCond());
}
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
- : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+ : Stmt(ForStmtClass), LParenLoc(LP), RParenLoc(RP)
{
SubExprs[INIT] = Init;
setConditionVariable(C, condVar);
SubExprs[COND] = Cond;
SubExprs[INC] = Inc;
SubExprs[BODY] = Body;
+ ForStmtBits.ForLoc = FL;
}
VarDecl *ForStmt::getConditionVariable() const {
@@ -855,66 +918,125 @@ void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
VarRange.getEnd());
}
-SwitchStmt::SwitchStmt(const ASTContext &C, Stmt *init, VarDecl *Var,
- Expr *cond)
- : Stmt(SwitchStmtClass), FirstCase(nullptr, false) {
- setConditionVariable(C, Var);
- SubExprs[INIT] = init;
- SubExprs[COND] = cond;
- SubExprs[BODY] = nullptr;
+SwitchStmt::SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
+ Expr *Cond)
+ : Stmt(SwitchStmtClass), FirstCase(nullptr) {
+ bool HasInit = Init != nullptr;
+ bool HasVar = Var != nullptr;
+ SwitchStmtBits.HasInit = HasInit;
+ SwitchStmtBits.HasVar = HasVar;
+ SwitchStmtBits.AllEnumCasesCovered = false;
+
+ setCond(Cond);
+ setBody(nullptr);
+ if (HasInit)
+ setInit(Init);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+
+ setSwitchLoc(SourceLocation{});
}
-VarDecl *SwitchStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
- return nullptr;
+SwitchStmt::SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar)
+ : Stmt(SwitchStmtClass, Empty) {
+ SwitchStmtBits.HasInit = HasInit;
+ SwitchStmtBits.HasVar = HasVar;
+ SwitchStmtBits.AllEnumCasesCovered = false;
+}
+
+SwitchStmt *SwitchStmt::Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
+ Expr *Cond) {
+ bool HasInit = Init != nullptr;
+ bool HasVar = Var != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasInit + HasVar),
+ alignof(SwitchStmt));
+ return new (Mem) SwitchStmt(Ctx, Init, Var, Cond);
+}
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
+SwitchStmt *SwitchStmt::CreateEmpty(const ASTContext &Ctx, bool HasInit,
+ bool HasVar) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasInit + HasVar),
+ alignof(SwitchStmt));
+ return new (Mem) SwitchStmt(EmptyShell(), HasInit, HasVar);
+}
+
+VarDecl *SwitchStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
+ return nullptr;
return cast<VarDecl>(DS->getSingleDecl());
}
-void SwitchStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void SwitchStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This switch statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
-Stmt *SwitchCase::getSubStmt() {
- if (isa<CaseStmt>(this))
- return cast<CaseStmt>(this)->getSubStmt();
- return cast<DefaultStmt>(this)->getSubStmt();
+WhileStmt::WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
+ Stmt *Body, SourceLocation WL)
+ : Stmt(WhileStmtClass) {
+ bool HasVar = Var != nullptr;
+ WhileStmtBits.HasVar = HasVar;
+
+ setCond(Cond);
+ setBody(Body);
+ if (HasVar)
+ setConditionVariable(Ctx, Var);
+
+ setWhileLoc(WL);
}
-WhileStmt::WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
- SourceLocation WL)
- : Stmt(WhileStmtClass) {
- setConditionVariable(C, Var);
- SubExprs[COND] = cond;
- SubExprs[BODY] = body;
- WhileLoc = WL;
+WhileStmt::WhileStmt(EmptyShell Empty, bool HasVar)
+ : Stmt(WhileStmtClass, Empty) {
+ WhileStmtBits.HasVar = HasVar;
}
-VarDecl *WhileStmt::getConditionVariable() const {
- if (!SubExprs[VAR])
- return nullptr;
+WhileStmt *WhileStmt::Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
+ Stmt *Body, SourceLocation WL) {
+ bool HasVar = Var != nullptr;
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
+ alignof(WhileStmt));
+ return new (Mem) WhileStmt(Ctx, Var, Cond, Body, WL);
+}
+
+WhileStmt *WhileStmt::CreateEmpty(const ASTContext &Ctx, bool HasVar) {
+ void *Mem =
+ Ctx.Allocate(totalSizeToAlloc<Stmt *>(NumMandatoryStmtPtr + HasVar),
+ alignof(WhileStmt));
+ return new (Mem) WhileStmt(EmptyShell(), HasVar);
+}
- auto *DS = cast<DeclStmt>(SubExprs[VAR]);
+VarDecl *WhileStmt::getConditionVariable() {
+ auto *DS = getConditionVariableDeclStmt();
+ if (!DS)
+ return nullptr;
return cast<VarDecl>(DS->getSingleDecl());
}
-void WhileStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
+void WhileStmt::setConditionVariable(const ASTContext &Ctx, VarDecl *V) {
+ assert(hasVarStorage() &&
+ "This while statement has no storage for a condition variable!");
+
if (!V) {
- SubExprs[VAR] = nullptr;
+ getTrailingObjects<Stmt *>()[varOffset()] = nullptr;
return;
}
SourceRange VarRange = V->getSourceRange();
- SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
- VarRange.getEnd());
+ getTrailingObjects<Stmt *>()[varOffset()] = new (Ctx)
+ DeclStmt(DeclGroupRef(V), VarRange.getBegin(), VarRange.getEnd());
}
// IndirectGotoStmt
@@ -925,11 +1047,54 @@ LabelDecl *IndirectGotoStmt::getConstantTarget() {
}
// ReturnStmt
-const Expr* ReturnStmt::getRetValue() const {
- return cast_or_null<Expr>(RetExpr);
-}
-Expr* ReturnStmt::getRetValue() {
- return cast_or_null<Expr>(RetExpr);
+ReturnStmt::ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
+ : Stmt(ReturnStmtClass), RetExpr(E) {
+ bool HasNRVOCandidate = NRVOCandidate != nullptr;
+ ReturnStmtBits.HasNRVOCandidate = HasNRVOCandidate;
+ if (HasNRVOCandidate)
+ setNRVOCandidate(NRVOCandidate);
+ setReturnLoc(RL);
+}
+
+ReturnStmt::ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate)
+ : Stmt(ReturnStmtClass, Empty) {
+ ReturnStmtBits.HasNRVOCandidate = HasNRVOCandidate;
+}
+
+ReturnStmt *ReturnStmt::Create(const ASTContext &Ctx, SourceLocation RL,
+ Expr *E, const VarDecl *NRVOCandidate) {
+ bool HasNRVOCandidate = NRVOCandidate != nullptr;
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<const VarDecl *>(HasNRVOCandidate),
+ alignof(ReturnStmt));
+ return new (Mem) ReturnStmt(RL, E, NRVOCandidate);
+}
+
+ReturnStmt *ReturnStmt::CreateEmpty(const ASTContext &Ctx,
+ bool HasNRVOCandidate) {
+ void *Mem = Ctx.Allocate(totalSizeToAlloc<const VarDecl *>(HasNRVOCandidate),
+ alignof(ReturnStmt));
+ return new (Mem) ReturnStmt(EmptyShell(), HasNRVOCandidate);
+}
+
+// CaseStmt
+CaseStmt *CaseStmt::Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
+ SourceLocation caseLoc, SourceLocation ellipsisLoc,
+ SourceLocation colonLoc) {
+ bool CaseStmtIsGNURange = rhs != nullptr;
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + CaseStmtIsGNURange, CaseStmtIsGNURange),
+ alignof(CaseStmt));
+ return new (Mem) CaseStmt(lhs, rhs, caseLoc, ellipsisLoc, colonLoc);
+}
+
+CaseStmt *CaseStmt::CreateEmpty(const ASTContext &Ctx,
+ bool CaseStmtIsGNURange) {
+ void *Mem = Ctx.Allocate(
+ totalSizeToAlloc<Stmt *, SourceLocation>(
+ NumMandatoryStmtPtr + CaseStmtIsGNURange, CaseStmtIsGNURange),
+ alignof(CaseStmt));
+ return new (Mem) CaseStmt(EmptyShell(), CaseStmtIsGNURange);
}
SEHTryStmt::SEHTryStmt(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock,
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
index bf2d6a16fb5f..12367f8fd54b 100644
--- a/lib/AST/StmtCXX.cpp
+++ b/lib/AST/StmtCXX.cpp
@@ -45,7 +45,7 @@ CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
std::copy(handlers.begin(), handlers.end(), Stmts + 1);
}
-CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range,
+CXXForRangeStmt::CXXForRangeStmt(Stmt *Init, DeclStmt *Range,
DeclStmt *BeginStmt, DeclStmt *EndStmt,
Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
Stmt *Body, SourceLocation FL,
@@ -53,6 +53,7 @@ CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range,
SourceLocation RPL)
: Stmt(CXXForRangeStmtClass), ForLoc(FL), CoawaitLoc(CAL), ColonLoc(CL),
RParenLoc(RPL) {
+ SubExprs[INIT] = Init;
SubExprs[RANGE] = Range;
SubExprs[BEGINSTMT] = BeginStmt;
SubExprs[ENDSTMT] = EndStmt;
diff --git a/lib/AST/StmtObjC.cpp b/lib/AST/StmtObjC.cpp
index eea03f64c2fe..ed21e2d0d2b6 100644
--- a/lib/AST/StmtObjC.cpp
+++ b/lib/AST/StmtObjC.cpp
@@ -64,10 +64,10 @@ ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
-SourceLocation ObjCAtTryStmt::getLocEnd() const {
+SourceLocation ObjCAtTryStmt::getEndLoc() const {
if (HasFinally)
- return getFinallyStmt()->getLocEnd();
+ return getFinallyStmt()->getEndLoc();
if (NumCatchStmts)
- return getCatchStmt(NumCatchStmts - 1)->getLocEnd();
- return getTryBody()->getLocEnd();
+ return getCatchStmt(NumCatchStmts - 1)->getEndLoc();
+ return getTryBody()->getEndLoc();
}
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index 1258af7a2d37..85a2daa0801a 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -1079,6 +1079,8 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1145,6 +1147,8 @@ OMPDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
@@ -1457,6 +1461,8 @@ OMPTeamsDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
@@ -1524,6 +1530,8 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1670,6 +1678,8 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
Dir->HasCancel = HasCancel;
return Dir;
}
@@ -1741,6 +1751,8 @@ OMPTargetTeamsDistributeParallelForSimdDirective::Create(
Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
return Dir;
}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index cbf26c036058..ae726e387107 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -69,14 +69,16 @@ namespace {
unsigned IndentLevel;
PrinterHelper* Helper;
PrintingPolicy Policy;
+ std::string NL;
const ASTContext *Context;
public:
StmtPrinter(raw_ostream &os, PrinterHelper *helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
+ StringRef NL = "\n",
const ASTContext *Context = nullptr)
: OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy),
- Context(Context) {}
+ NL(NL), Context(Context) {}
void PrintStmt(Stmt *S) {
PrintStmt(S, Policy.Indentation);
@@ -88,15 +90,37 @@ namespace {
// If this is an expr used in a stmt context, indent and newline it.
Indent();
Visit(S);
- OS << ";\n";
+ OS << ";" << NL;
} else if (S) {
Visit(S);
} else {
- Indent() << "<<<NULL STATEMENT>>>\n";
+ Indent() << "<<<NULL STATEMENT>>>" << NL;
}
IndentLevel -= SubIndent;
}
+ void PrintInitStmt(Stmt *S, unsigned PrefixWidth) {
+ // FIXME: Cope better with odd prefix widths.
+ IndentLevel += (PrefixWidth + 1) / 2;
+ if (auto *DS = dyn_cast<DeclStmt>(S))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(S));
+ OS << "; ";
+ IndentLevel -= (PrefixWidth + 1) / 2;
+ }
+
+ void PrintControlledStmt(Stmt *S) {
+ if (auto *CS = dyn_cast<CompoundStmt>(S)) {
+ OS << " ";
+ PrintRawCompoundStmt(CS);
+ OS << NL;
+ } else {
+ OS << NL;
+ PrintStmt(S);
+ }
+ }
+
void PrintRawCompoundStmt(CompoundStmt *S);
void PrintRawDecl(Decl *D);
void PrintRawDeclStmt(const DeclStmt *S);
@@ -128,7 +152,7 @@ namespace {
}
void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
- Indent() << "<<unknown stmt type>>\n";
+ Indent() << "<<unknown stmt type>>" << NL;
}
void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
@@ -152,7 +176,7 @@ namespace {
/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
/// with no newline after the }.
void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
- OS << "{\n";
+ OS << "{" << NL;
for (auto *I : Node->body())
PrintStmt(I);
@@ -169,19 +193,19 @@ void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
}
void StmtPrinter::VisitNullStmt(NullStmt *Node) {
- Indent() << ";\n";
+ Indent() << ";" << NL;
}
void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
Indent();
PrintRawDeclStmt(Node);
- OS << ";\n";
+ OS << ";" << NL;
}
void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
Indent();
PrintRawCompoundStmt(Node);
- OS << "\n";
+ OS << "" << NL;
}
void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
@@ -191,18 +215,18 @@ void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
OS << " ... ";
PrintExpr(Node->getRHS());
}
- OS << ":\n";
+ OS << ":" << NL;
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
- Indent(-1) << "default:\n";
+ Indent(-1) << "default:" << NL;
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
- Indent(-1) << Node->getName() << ":\n";
+ Indent(-1) << Node->getName() << ":" << NL;
PrintStmt(Node->getSubStmt(), 0);
}
@@ -216,6 +240,8 @@ void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
OS << "if (";
+ if (If->getInit())
+ PrintInitStmt(If->getInit(), 4);
if (const DeclStmt *DS = If->getConditionVariableDeclStmt())
PrintRawDeclStmt(DS);
else
@@ -225,9 +251,9 @@ void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
if (auto *CS = dyn_cast<CompoundStmt>(If->getThen())) {
OS << ' ';
PrintRawCompoundStmt(CS);
- OS << (If->getElse() ? ' ' : '\n');
+ OS << (If->getElse() ? " " : NL);
} else {
- OS << '\n';
+ OS << NL;
PrintStmt(If->getThen());
if (If->getElse()) Indent();
}
@@ -238,12 +264,12 @@ void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
if (auto *CS = dyn_cast<CompoundStmt>(Else)) {
OS << ' ';
PrintRawCompoundStmt(CS);
- OS << '\n';
+ OS << NL;
} else if (auto *ElseIf = dyn_cast<IfStmt>(Else)) {
OS << ' ';
PrintRawIfStmt(ElseIf);
} else {
- OS << '\n';
+ OS << NL;
PrintStmt(If->getElse());
}
}
@@ -256,21 +282,14 @@ void StmtPrinter::VisitIfStmt(IfStmt *If) {
void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
Indent() << "switch (";
+ if (Node->getInit())
+ PrintInitStmt(Node->getInit(), 8);
if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
PrintRawDeclStmt(DS);
else
PrintExpr(Node->getCond());
OS << ")";
-
- // Pretty print compoundstmt bodies (very common).
- if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
- OS << " ";
- PrintRawCompoundStmt(CS);
- OS << "\n";
- } else {
- OS << "\n";
- PrintStmt(Node->getBody());
- }
+ PrintControlledStmt(Node->getBody());
}
void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
@@ -279,7 +298,7 @@ void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
PrintRawDeclStmt(DS);
else
PrintExpr(Node->getCond());
- OS << ")\n";
+ OS << ")" << NL;
PrintStmt(Node->getBody());
}
@@ -289,43 +308,31 @@ void StmtPrinter::VisitDoStmt(DoStmt *Node) {
PrintRawCompoundStmt(CS);
OS << " ";
} else {
- OS << "\n";
+ OS << NL;
PrintStmt(Node->getBody());
Indent();
}
OS << "while (";
PrintExpr(Node->getCond());
- OS << ");\n";
+ OS << ");" << NL;
}
void StmtPrinter::VisitForStmt(ForStmt *Node) {
Indent() << "for (";
- if (Node->getInit()) {
- if (auto *DS = dyn_cast<DeclStmt>(Node->getInit()))
- PrintRawDeclStmt(DS);
- else
- PrintExpr(cast<Expr>(Node->getInit()));
- }
- OS << ";";
- if (Node->getCond()) {
- OS << " ";
+ if (Node->getInit())
+ PrintInitStmt(Node->getInit(), 5);
+ else
+ OS << (Node->getCond() ? "; " : ";");
+ if (Node->getCond())
PrintExpr(Node->getCond());
- }
OS << ";";
if (Node->getInc()) {
OS << " ";
PrintExpr(Node->getInc());
}
- OS << ") ";
-
- if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
- PrintRawCompoundStmt(CS);
- OS << "\n";
- } else {
- OS << "\n";
- PrintStmt(Node->getBody());
- }
+ OS << ")";
+ PrintControlledStmt(Node->getBody());
}
void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
@@ -336,28 +343,21 @@ void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
PrintExpr(cast<Expr>(Node->getElement()));
OS << " in ";
PrintExpr(Node->getCollection());
- OS << ") ";
-
- if (auto *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
- PrintRawCompoundStmt(CS);
- OS << "\n";
- } else {
- OS << "\n";
- PrintStmt(Node->getBody());
- }
+ OS << ")";
+ PrintControlledStmt(Node->getBody());
}
void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
Indent() << "for (";
+ if (Node->getInit())
+ PrintInitStmt(Node->getInit(), 5);
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressInitializers = true;
Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel);
OS << " : ";
PrintExpr(Node->getRangeInit());
- OS << ") {\n";
- PrintStmt(Node->getBody());
- Indent() << "}";
- if (Policy.IncludeNewlines) OS << "\n";
+ OS << ")";
+ PrintControlledStmt(Node->getBody());
}
void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
@@ -378,24 +378,24 @@ void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
Indent() << "goto " << Node->getLabel()->getName() << ";";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
Indent() << "goto *";
PrintExpr(Node->getTarget());
OS << ";";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
Indent() << "continue;";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
Indent() << "break;";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
@@ -405,7 +405,7 @@ void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
PrintExpr(Node->getRetValue());
}
OS << ";";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
@@ -470,17 +470,17 @@ void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
}
OS << ");";
- if (Policy.IncludeNewlines) OS << "\n";
+ if (Policy.IncludeNewlines) OS << NL;
}
void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
// FIXME: Implement MS style inline asm statement printer.
Indent() << "__asm ";
if (Node->hasBraces())
- OS << "{\n";
- OS << Node->getAsmString() << "\n";
+ OS << "{" << NL;
+ OS << Node->getAsmString() << NL;
if (Node->hasBraces())
- Indent() << "}\n";
+ Indent() << "}" << NL;
}
void StmtPrinter::VisitCapturedStmt(CapturedStmt *Node) {
@@ -491,7 +491,7 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
Indent() << "@try";
if (auto *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
PrintRawCompoundStmt(TS);
- OS << "\n";
+ OS << NL;
}
for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
@@ -504,14 +504,14 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
OS << ")";
if (auto *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
PrintRawCompoundStmt(CS);
- OS << "\n";
+ OS << NL;
}
}
if (auto *FS = static_cast<ObjCAtFinallyStmt *>(Node->getFinallyStmt())) {
Indent() << "@finally";
PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
- OS << "\n";
+ OS << NL;
}
}
@@ -519,7 +519,7 @@ void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
}
void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
- Indent() << "@catch (...) { /* todo */ } \n";
+ Indent() << "@catch (...) { /* todo */ } " << NL;
}
void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
@@ -528,7 +528,7 @@ void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
OS << " ";
PrintExpr(Node->getThrowExpr());
}
- OS << ";\n";
+ OS << ";" << NL;
}
void StmtPrinter::VisitObjCAvailabilityCheckExpr(
@@ -541,13 +541,13 @@ void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
PrintExpr(Node->getSynchExpr());
OS << ")";
PrintRawCompoundStmt(Node->getSynchBody());
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
Indent() << "@autoreleasepool";
PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
@@ -563,7 +563,7 @@ void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
Indent();
PrintRawCXXCatchStmt(Node);
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
@@ -573,7 +573,7 @@ void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
OS << " ";
PrintRawCXXCatchStmt(Node->getHandler(i));
}
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
@@ -587,471 +587,38 @@ void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
assert(F && "Must have a finally block...");
PrintRawSEHFinallyStmt(F);
}
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) {
OS << "__finally ";
PrintRawCompoundStmt(Node->getBlock());
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) {
OS << "__except (";
VisitExpr(Node->getFilterExpr());
- OS << ")\n";
+ OS << ")" << NL;
PrintRawCompoundStmt(Node->getBlock());
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) {
Indent();
PrintRawSEHExceptHandler(Node);
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
Indent();
PrintRawSEHFinallyStmt(Node);
- OS << "\n";
+ OS << NL;
}
void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
Indent() << "__leave;";
- if (Policy.IncludeNewlines) OS << "\n";
-}
-
-//===----------------------------------------------------------------------===//
-// OpenMP clauses printing methods
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
- raw_ostream &OS;
- const PrintingPolicy &Policy;
-
- /// Process clauses with list of variables.
- template <typename T>
- void VisitOMPClauseList(T *Node, char StartSym);
-
-public:
- OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
- : OS(OS), Policy(Policy) {}
-
-#define OPENMP_CLAUSE(Name, Class) \
- void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
-};
-
-} // namespace
-
-void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
- OS << "if(";
- if (Node->getNameModifier() != OMPD_unknown)
- OS << getOpenMPDirectiveName(Node->getNameModifier()) << ": ";
- Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
- OS << "final(";
- Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
- OS << "num_threads(";
- Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
- OS << "safelen(";
- Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
- OS << "simdlen(";
- Node->getSimdlen()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
- OS << "collapse(";
- Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
- OS << "default("
- << getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
- << ")";
-}
-
-void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
- OS << "proc_bind("
- << getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
- << ")";
-}
-
-void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
- OS << "schedule(";
- if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
- Node->getFirstScheduleModifier());
- if (Node->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
- OS << ", ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule,
- Node->getSecondScheduleModifier());
- }
- OS << ": ";
- }
- OS << getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
- if (auto *E = Node->getChunkSize()) {
- OS << ", ";
- E->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) {
- OS << "ordered";
- if (auto *Num = Node->getNumForLoops()) {
- OS << "(";
- Num->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
- OS << "nowait";
-}
-
-void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
- OS << "untied";
-}
-
-void OMPClausePrinter::VisitOMPNogroupClause(OMPNogroupClause *) {
- OS << "nogroup";
-}
-
-void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
- OS << "mergeable";
-}
-
-void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
-
-void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
-
-void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
- OS << "update";
-}
-
-void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
- OS << "capture";
-}
-
-void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
- OS << "seq_cst";
-}
-
-void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) {
- OS << "threads";
-}
-
-void OMPClausePrinter::VisitOMPSIMDClause(OMPSIMDClause *) { OS << "simd"; }
-
-void OMPClausePrinter::VisitOMPDeviceClause(OMPDeviceClause *Node) {
- OS << "device(";
- Node->getDevice()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumTeamsClause(OMPNumTeamsClause *Node) {
- OS << "num_teams(";
- Node->getNumTeams()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPThreadLimitClause(OMPThreadLimitClause *Node) {
- OS << "thread_limit(";
- Node->getThreadLimit()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
- OS << "priority(";
- Node->getPriority()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
- OS << "grainsize(";
- Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
- OS << "num_tasks(";
- Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
- OS << "hint(";
- Node->getHint()->printPretty(OS, nullptr, Policy, 0);
- OS << ")";
-}
-
-template<typename T>
-void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
- for (typename T::varlist_iterator I = Node->varlist_begin(),
- E = Node->varlist_end();
- I != E; ++I) {
- assert(*I && "Expected non-null Stmt");
- OS << (I == Node->varlist_begin() ? StartSym : ',');
- if (auto *DRE = dyn_cast<DeclRefExpr>(*I)) {
- if (isa<OMPCapturedExprDecl>(DRE->getDecl()))
- DRE->printPretty(OS, nullptr, Policy, 0);
- else
- DRE->getDecl()->printQualifiedName(OS);
- } else
- (*I)->printPretty(OS, nullptr, Policy, 0);
- }
-}
-
-void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "private";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "firstprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "lastprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "shared";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPTaskReductionClause(
- OMPTaskReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "task_reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "in_reduction(";
- NestedNameSpecifier *QualifierLoc =
- Node->getQualifierLoc().getNestedNameSpecifier();
- OverloadedOperatorKind OOK =
- Node->getNameInfo().getName().getCXXOverloadedOperator();
- if (QualifierLoc == nullptr && OOK != OO_None) {
- // Print reduction identifier in C format
- OS << getOperatorSpelling(OOK);
- } else {
- // Use C++ format
- if (QualifierLoc != nullptr)
- QualifierLoc->print(OS, Policy);
- OS << Node->getNameInfo();
- }
- OS << ":";
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "linear";
- if (Node->getModifierLoc().isValid()) {
- OS << '('
- << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
- }
- VisitOMPClauseList(Node, '(');
- if (Node->getModifierLoc().isValid())
- OS << ')';
- if (Node->getStep() != nullptr) {
- OS << ": ";
- Node->getStep()->printPretty(OS, nullptr, Policy, 0);
- }
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "aligned";
- VisitOMPClauseList(Node, '(');
- if (Node->getAlignment() != nullptr) {
- OS << ": ";
- Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
- }
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "copyin";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "copyprivate";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
- if (!Node->varlist_empty()) {
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
- OS << "depend(";
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getDependencyKind());
- if (!Node->varlist_empty()) {
- OS << " :";
- VisitOMPClauseList(Node, ' ');
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "map(";
- if (Node->getMapType() != OMPC_MAP_unknown) {
- if (Node->getMapTypeModifier() != OMPC_MAP_unknown) {
- OS << getOpenMPSimpleClauseTypeName(OMPC_map,
- Node->getMapTypeModifier());
- OS << ',';
- }
- OS << getOpenMPSimpleClauseTypeName(OMPC_map, Node->getMapType());
- OS << ':';
- }
- VisitOMPClauseList(Node, ' ');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPToClause(OMPToClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "to";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPFromClause(OMPFromClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "from";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPDistScheduleClause(OMPDistScheduleClause *Node) {
- OS << "dist_schedule(" << getOpenMPSimpleClauseTypeName(
- OMPC_dist_schedule, Node->getDistScheduleKind());
- if (auto *E = Node->getChunkSize()) {
- OS << ", ";
- E->printPretty(OS, nullptr, Policy);
- }
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPDefaultmapClause(OMPDefaultmapClause *Node) {
- OS << "defaultmap(";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapModifier());
- OS << ": ";
- OS << getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- Node->getDefaultmapKind());
- OS << ")";
-}
-
-void OMPClausePrinter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "use_device_ptr";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
-}
-
-void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
- if (!Node->varlist_empty()) {
- OS << "is_device_ptr";
- VisitOMPClauseList(Node, '(');
- OS << ")";
- }
+ if (Policy.IncludeNewlines) OS << NL;
}
//===----------------------------------------------------------------------===//
@@ -1067,7 +634,7 @@ void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S,
OS << ' ';
Printer.Visit(Clause);
}
- OS << "\n";
+ OS << NL;
if (!ForceNoStmt && S->hasAssociatedStmt())
PrintStmt(S->getInnermostCapturedStmt()->getCapturedStmt());
}
@@ -1339,6 +906,10 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
// Expr printing methods.
//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitConstantExpr(ConstantExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
if (const auto *OCED = dyn_cast<OMPCapturedExprDecl>(Node->getDecl())) {
OCED->getInit()->IgnoreImpCasts()->printPretty(OS, nullptr, Policy);
@@ -1378,7 +949,7 @@ static bool isImplicitSelf(const Expr *E) {
if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
if (const auto *PD = dyn_cast<ImplicitParamDecl>(DRE->getDecl())) {
if (PD->getParameterKind() == ImplicitParamDecl::ObjCSelf &&
- DRE->getLocStart().isInvalid())
+ DRE->getBeginLoc().isInvalid())
return true;
}
}
@@ -1424,7 +995,7 @@ void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
}
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
- OS << PredefinedExpr::getIdentTypeName(Node->getIdentType());
+ OS << PredefinedExpr::getIdentKindName(Node->getIdentKind());
}
void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
@@ -1668,6 +1239,9 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
else
OS << "__alignof";
break;
+ case UETT_PreferredAlignOf:
+ OS << "__alignof";
+ break;
case UETT_VecStep:
OS << "vec_step";
break;
@@ -2808,8 +2382,9 @@ void Stmt::dumpPretty(const ASTContext &Context) const {
void Stmt::printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation,
+ StringRef NL,
const ASTContext *Context) const {
- StmtPrinter P(OS, Helper, Policy, Indentation, Context);
+ StmtPrinter P(OS, Helper, Policy, Indentation, NL, Context);
P.Visit(const_cast<Stmt*>(this));
}
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index 15653c4fd838..ec4dac03d497 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -189,7 +189,7 @@ namespace {
// store its nullness. Add a boolean here to match.
ID.AddBoolean(true);
}
- Hash.AddDeclarationName(Name);
+ Hash.AddDeclarationName(Name, TreatAsDecl);
}
void VisitIdentifierInfo(IdentifierInfo *II) override {
ID.AddBoolean(II);
@@ -467,6 +467,21 @@ void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
+void OMPClauseProfiler::VisitOMPUnifiedAddressClause(
+ const OMPUnifiedAddressClause *C) {}
+
+void OMPClauseProfiler::VisitOMPUnifiedSharedMemoryClause(
+ const OMPUnifiedSharedMemoryClause *C) {}
+
+void OMPClauseProfiler::VisitOMPReverseOffloadClause(
+ const OMPReverseOffloadClause *C) {}
+
+void OMPClauseProfiler::VisitOMPDynamicAllocatorsClause(
+ const OMPDynamicAllocatorsClause *C) {}
+
+void OMPClauseProfiler::VisitOMPAtomicDefaultMemOrderClause(
+ const OMPAtomicDefaultMemOrderClause *C) {}
+
void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
VistOMPClauseWithPreInit(C);
if (auto *S = C->getChunkSize())
@@ -984,6 +999,10 @@ void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
+void StmtProfiler::VisitConstantExpr(const ConstantExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
VisitExpr(S);
if (!Canonical)
@@ -998,7 +1017,7 @@ void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
- ID.AddInteger(S->getIdentType());
+ ID.AddInteger(S->getIdentKind());
}
void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
diff --git a/lib/AST/TextNodeDumper.cpp b/lib/AST/TextNodeDumper.cpp
new file mode 100644
index 000000000000..b51a9006226a
--- /dev/null
+++ b/lib/AST/TextNodeDumper.cpp
@@ -0,0 +1,1168 @@
+//===--- TextNodeDumper.cpp - Printing of AST nodes -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements AST dumping of components of individual AST nodes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TextNodeDumper.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/LocInfoType.h"
+
+using namespace clang;
+
+static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
+
+template <typename T>
+static void dumpPreviousDeclImpl(raw_ostream &OS, const Mergeable<T> *D) {
+ const T *First = D->getFirstDecl();
+ if (First != D)
+ OS << " first " << First;
+}
+
+template <typename T>
+static void dumpPreviousDeclImpl(raw_ostream &OS, const Redeclarable<T> *D) {
+ const T *Prev = D->getPreviousDecl();
+ if (Prev)
+ OS << " prev " << Prev;
+}
+
+/// Dump the previous declaration in the redeclaration chain for a declaration,
+/// if any.
+static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ return dumpPreviousDeclImpl(OS, cast<DERIVED##Decl>(D));
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
+}
+
+TextNodeDumper::TextNodeDumper(raw_ostream &OS, bool ShowColors,
+ const SourceManager *SM,
+ const PrintingPolicy &PrintPolicy,
+ const comments::CommandTraits *Traits)
+ : TextTreeStructure(OS, ShowColors), OS(OS), ShowColors(ShowColors), SM(SM),
+ PrintPolicy(PrintPolicy), Traits(Traits) {}
+
+void TextNodeDumper::Visit(const comments::Comment *C,
+ const comments::FullComment *FC) {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, CommentColor);
+ OS << C->getCommentKindName();
+ }
+ dumpPointer(C);
+ dumpSourceRange(C->getSourceRange());
+
+ ConstCommentVisitor<TextNodeDumper, void,
+ const comments::FullComment *>::visit(C, FC);
+}
+
+void TextNodeDumper::Visit(const Attr *A) {
+ {
+ ColorScope Color(OS, ShowColors, AttrColor);
+
+ switch (A->getKind()) {
+#define ATTR(X) \
+ case attr::X: \
+ OS << #X; \
+ break;
+#include "clang/Basic/AttrList.inc"
+ }
+ OS << "Attr";
+ }
+ dumpPointer(A);
+ dumpSourceRange(A->getRange());
+ if (A->isInherited())
+ OS << " Inherited";
+ if (A->isImplicit())
+ OS << " Implicit";
+
+ ConstAttrVisitor<TextNodeDumper>::Visit(A);
+}
+
+void TextNodeDumper::Visit(const TemplateArgument &TA, SourceRange R,
+ const Decl *From, StringRef Label) {
+ OS << "TemplateArgument";
+ if (R.isValid())
+ dumpSourceRange(R);
+
+ if (From)
+ dumpDeclRef(From, Label);
+
+ ConstTemplateArgumentVisitor<TextNodeDumper>::Visit(TA);
+}
+
+void TextNodeDumper::Visit(const Stmt *Node) {
+ if (!Node) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+ {
+ ColorScope Color(OS, ShowColors, StmtColor);
+ OS << Node->getStmtClassName();
+ }
+ dumpPointer(Node);
+ dumpSourceRange(Node->getSourceRange());
+
+ if (const auto *E = dyn_cast<Expr>(Node)) {
+ dumpType(E->getType());
+
+ {
+ ColorScope Color(OS, ShowColors, ValueKindColor);
+ switch (E->getValueKind()) {
+ case VK_RValue:
+ break;
+ case VK_LValue:
+ OS << " lvalue";
+ break;
+ case VK_XValue:
+ OS << " xvalue";
+ break;
+ }
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, ObjectKindColor);
+ switch (E->getObjectKind()) {
+ case OK_Ordinary:
+ break;
+ case OK_BitField:
+ OS << " bitfield";
+ break;
+ case OK_ObjCProperty:
+ OS << " objcproperty";
+ break;
+ case OK_ObjCSubscript:
+ OS << " objcsubscript";
+ break;
+ case OK_VectorComponent:
+ OS << " vectorcomponent";
+ break;
+ }
+ }
+ }
+
+ ConstStmtVisitor<TextNodeDumper>::Visit(Node);
+}
+
+void TextNodeDumper::Visit(const Type *T) {
+ if (!T) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+ if (isa<LocInfoType>(T)) {
+ {
+ ColorScope Color(OS, ShowColors, TypeColor);
+ OS << "LocInfo Type";
+ }
+ dumpPointer(T);
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, TypeColor);
+ OS << T->getTypeClassName() << "Type";
+ }
+ dumpPointer(T);
+ OS << " ";
+ dumpBareType(QualType(T, 0), false);
+
+ QualType SingleStepDesugar =
+ T->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (SingleStepDesugar != QualType(T, 0))
+ OS << " sugar";
+
+ if (T->isDependentType())
+ OS << " dependent";
+ else if (T->isInstantiationDependentType())
+ OS << " instantiation_dependent";
+
+ if (T->isVariablyModifiedType())
+ OS << " variably_modified";
+ if (T->containsUnexpandedParameterPack())
+ OS << " contains_unexpanded_pack";
+ if (T->isFromAST())
+ OS << " imported";
+
+ TypeVisitor<TextNodeDumper>::Visit(T);
+}
+
+void TextNodeDumper::Visit(QualType T) {
+ OS << "QualType";
+ dumpPointer(T.getAsOpaquePtr());
+ OS << " ";
+ dumpBareType(T, false);
+ OS << " " << T.split().Quals.getAsString();
+}
+
+void TextNodeDumper::Visit(const Decl *D) {
+ if (!D) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << D->getDeclKindName() << "Decl";
+ }
+ dumpPointer(D);
+ if (D->getLexicalDeclContext() != D->getDeclContext())
+ OS << " parent " << cast<Decl>(D->getDeclContext());
+ dumpPreviousDecl(OS, D);
+ dumpSourceRange(D->getSourceRange());
+ OS << ' ';
+ dumpLocation(D->getLocation());
+ if (D->isFromASTFile())
+ OS << " imported";
+ if (Module *M = D->getOwningModule())
+ OS << " in " << M->getFullModuleName();
+ if (auto *ND = dyn_cast<NamedDecl>(D))
+ for (Module *M : D->getASTContext().getModulesWithMergedDefinition(
+ const_cast<NamedDecl *>(ND)))
+ AddChild([=] { OS << "also in " << M->getFullModuleName(); });
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->isHidden())
+ OS << " hidden";
+ if (D->isImplicit())
+ OS << " implicit";
+
+ if (D->isUsed())
+ OS << " used";
+ else if (D->isThisDeclarationReferenced())
+ OS << " referenced";
+
+ if (D->isInvalidDecl())
+ OS << " invalid";
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isConstexpr())
+ OS << " constexpr";
+}
+
+void TextNodeDumper::Visit(const CXXCtorInitializer *Init) {
+ OS << "CXXCtorInitializer";
+ if (Init->isAnyMemberInitializer()) {
+ OS << ' ';
+ dumpBareDeclRef(Init->getAnyMember());
+ } else if (Init->isBaseInitializer()) {
+ dumpType(QualType(Init->getBaseClass(), 0));
+ } else if (Init->isDelegatingInitializer()) {
+ dumpType(Init->getTypeSourceInfo()->getType());
+ } else {
+ llvm_unreachable("Unknown initializer type");
+ }
+}
+
+void TextNodeDumper::Visit(const BlockDecl::Capture &C) {
+ OS << "capture";
+ if (C.isByRef())
+ OS << " byref";
+ if (C.isNested())
+ OS << " nested";
+ if (C.getVariable()) {
+ OS << ' ';
+ dumpBareDeclRef(C.getVariable());
+ }
+}
+
+void TextNodeDumper::Visit(const OMPClause *C) {
+ if (!C) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> OMPClause";
+ return;
+ }
+ {
+ ColorScope Color(OS, ShowColors, AttrColor);
+ StringRef ClauseName(getOpenMPClauseName(C->getClauseKind()));
+ OS << "OMP" << ClauseName.substr(/*Start=*/0, /*N=*/1).upper()
+ << ClauseName.drop_front() << "Clause";
+ }
+ dumpPointer(C);
+ dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc()));
+ if (C->isImplicit())
+ OS << " <implicit>";
+}
+
+void TextNodeDumper::dumpPointer(const void *Ptr) {
+ ColorScope Color(OS, ShowColors, AddressColor);
+ OS << ' ' << Ptr;
+}
+
+void TextNodeDumper::dumpLocation(SourceLocation Loc) {
+ if (!SM)
+ return;
+
+ ColorScope Color(OS, ShowColors, LocationColor);
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine() << ':'
+ << PLoc.getColumn();
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ OS << "line" << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ LastLocLine = PLoc.getLine();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+}
+
+void TextNodeDumper::dumpSourceRange(SourceRange R) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (!SM)
+ return;
+
+ OS << " <";
+ dumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ OS << ", ";
+ dumpLocation(R.getEnd());
+ }
+ OS << ">";
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+}
+
+void TextNodeDumper::dumpBareType(QualType T, bool Desugar) {
+ ColorScope Color(OS, ShowColors, TypeColor);
+
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
+
+ if (Desugar && !T.isNull()) {
+ // If the type is sugared, also dump a (shallow) desugared type.
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
+ }
+}
+
+void TextNodeDumper::dumpType(QualType T) {
+ OS << ' ';
+ dumpBareType(T);
+}
+
+void TextNodeDumper::dumpBareDeclRef(const Decl *D) {
+ if (!D) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << D->getDeclKindName();
+ }
+ dumpPointer(D);
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ ColorScope Color(OS, ShowColors, DeclNameColor);
+ OS << " '" << ND->getDeclName() << '\'';
+ }
+
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ dumpType(VD->getType());
+}
+
+void TextNodeDumper::dumpName(const NamedDecl *ND) {
+ if (ND->getDeclName()) {
+ ColorScope Color(OS, ShowColors, DeclNameColor);
+ OS << ' ' << ND->getNameAsString();
+ }
+}
+
+void TextNodeDumper::dumpAccessSpecifier(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ break;
+ case AS_public:
+ OS << "public";
+ break;
+ case AS_protected:
+ OS << "protected";
+ break;
+ case AS_private:
+ OS << "private";
+ break;
+ }
+}
+
+void TextNodeDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
+ OS << "(CXXTemporary";
+ dumpPointer(Temporary);
+ OS << ")";
+}
+
+void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
+ if (!D)
+ return;
+
+ AddChild([=] {
+ if (!Label.empty())
+ OS << Label << ' ';
+ dumpBareDeclRef(D);
+ });
+}
+
+const char *TextNodeDumper::getCommandName(unsigned CommandID) {
+ if (Traits)
+ return Traits->getCommandInfo(CommandID)->Name;
+ const comments::CommandInfo *Info =
+ comments::CommandTraits::getBuiltinCommandInfo(CommandID);
+ if (Info)
+ return Info->Name;
+ return "<not a builtin command>";
+}
+
+void TextNodeDumper::visitTextComment(const comments::TextComment *C,
+ const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void TextNodeDumper::visitInlineCommandComment(
+ const comments::InlineCommandComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ switch (C->getRenderKind()) {
+ case comments::InlineCommandComment::RenderNormal:
+ OS << " RenderNormal";
+ break;
+ case comments::InlineCommandComment::RenderBold:
+ OS << " RenderBold";
+ break;
+ case comments::InlineCommandComment::RenderMonospaced:
+ OS << " RenderMonospaced";
+ break;
+ case comments::InlineCommandComment::RenderEmphasized:
+ OS << " RenderEmphasized";
+ break;
+ }
+
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void TextNodeDumper::visitHTMLStartTagComment(
+ const comments::HTMLStartTagComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+ if (C->getNumAttrs() != 0) {
+ OS << " Attrs: ";
+ for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
+ const comments::HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
+ OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
+ }
+ }
+ if (C->isSelfClosing())
+ OS << " SelfClosing";
+}
+
+void TextNodeDumper::visitHTMLEndTagComment(
+ const comments::HTMLEndTagComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << C->getTagName() << "\"";
+}
+
+void TextNodeDumper::visitBlockCommandComment(
+ const comments::BlockCommandComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
+ for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
+ OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
+}
+
+void TextNodeDumper::visitParamCommandComment(
+ const comments::ParamCommandComment *C, const comments::FullComment *FC) {
+ OS << " "
+ << comments::ParamCommandComment::getDirectionAsString(C->getDirection());
+
+ if (C->isDirectionExplicit())
+ OS << " explicitly";
+ else
+ OS << " implicitly";
+
+ if (C->hasParamName()) {
+ if (C->isParamIndexValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isParamIndexValid() && !C->isVarArgParam())
+ OS << " ParamIndex=" << C->getParamIndex();
+}
+
+void TextNodeDumper::visitTParamCommandComment(
+ const comments::TParamCommandComment *C, const comments::FullComment *FC) {
+ if (C->hasParamName()) {
+ if (C->isPositionValid())
+ OS << " Param=\"" << C->getParamName(FC) << "\"";
+ else
+ OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
+ }
+
+ if (C->isPositionValid()) {
+ OS << " Position=<";
+ for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
+ OS << C->getIndex(i);
+ if (i != e - 1)
+ OS << ", ";
+ }
+ OS << ">";
+ }
+}
+
+void TextNodeDumper::visitVerbatimBlockComment(
+ const comments::VerbatimBlockComment *C, const comments::FullComment *) {
+ OS << " Name=\"" << getCommandName(C->getCommandID())
+ << "\""
+ " CloseName=\""
+ << C->getCloseName() << "\"";
+}
+
+void TextNodeDumper::visitVerbatimBlockLineComment(
+ const comments::VerbatimBlockLineComment *C,
+ const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void TextNodeDumper::visitVerbatimLineComment(
+ const comments::VerbatimLineComment *C, const comments::FullComment *) {
+ OS << " Text=\"" << C->getText() << "\"";
+}
+
+void TextNodeDumper::VisitNullTemplateArgument(const TemplateArgument &) {
+ OS << " null";
+}
+
+void TextNodeDumper::VisitTypeTemplateArgument(const TemplateArgument &TA) {
+ OS << " type";
+ dumpType(TA.getAsType());
+}
+
+void TextNodeDumper::VisitDeclarationTemplateArgument(
+ const TemplateArgument &TA) {
+ OS << " decl";
+ dumpDeclRef(TA.getAsDecl());
+}
+
+void TextNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &) {
+ OS << " nullptr";
+}
+
+void TextNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) {
+ OS << " integral " << TA.getAsIntegral();
+}
+
+void TextNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) {
+ OS << " template ";
+ TA.getAsTemplate().dump(OS);
+}
+
+void TextNodeDumper::VisitTemplateExpansionTemplateArgument(
+ const TemplateArgument &TA) {
+ OS << " template expansion ";
+ TA.getAsTemplateOrTemplatePattern().dump(OS);
+}
+
+void TextNodeDumper::VisitExpressionTemplateArgument(const TemplateArgument &) {
+ OS << " expr";
+}
+
+void TextNodeDumper::VisitPackTemplateArgument(const TemplateArgument &) {
+ OS << " pack";
+}
+
+static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
+ if (Node->path_empty())
+ return;
+
+ OS << " (";
+ bool First = true;
+ for (CastExpr::path_const_iterator I = Node->path_begin(),
+ E = Node->path_end();
+ I != E; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ if (!First)
+ OS << " -> ";
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual())
+ OS << "virtual ";
+ OS << RD->getName();
+ First = false;
+ }
+
+ OS << ')';
+}
+
+void TextNodeDumper::VisitIfStmt(const IfStmt *Node) {
+ if (Node->hasInitStorage())
+ OS << " has_init";
+ if (Node->hasVarStorage())
+ OS << " has_var";
+ if (Node->hasElseStorage())
+ OS << " has_else";
+}
+
+void TextNodeDumper::VisitSwitchStmt(const SwitchStmt *Node) {
+ if (Node->hasInitStorage())
+ OS << " has_init";
+ if (Node->hasVarStorage())
+ OS << " has_var";
+}
+
+void TextNodeDumper::VisitWhileStmt(const WhileStmt *Node) {
+ if (Node->hasVarStorage())
+ OS << " has_var";
+}
+
+void TextNodeDumper::VisitLabelStmt(const LabelStmt *Node) {
+ OS << " '" << Node->getName() << "'";
+}
+
+void TextNodeDumper::VisitGotoStmt(const GotoStmt *Node) {
+ OS << " '" << Node->getLabel()->getName() << "'";
+ dumpPointer(Node->getLabel());
+}
+
+void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) {
+ if (Node->caseStmtIsGNURange())
+ OS << " gnu_range";
+}
+
+void TextNodeDumper::VisitCallExpr(const CallExpr *Node) {
+ if (Node->usesADL())
+ OS << " adl";
+}
+
+void TextNodeDumper::VisitCastExpr(const CastExpr *Node) {
+ OS << " <";
+ {
+ ColorScope Color(OS, ShowColors, CastColor);
+ OS << Node->getCastKindName();
+ }
+ dumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void TextNodeDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
+ VisitCastExpr(Node);
+ if (Node->isPartOfExplicitCast())
+ OS << " part_of_explicit_cast";
+}
+
+void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
+ OS << " ";
+ dumpBareDeclRef(Node->getDecl());
+ if (Node->getDecl() != Node->getFoundDecl()) {
+ OS << " (";
+ dumpBareDeclRef(Node->getFoundDecl());
+ OS << ")";
+ }
+}
+
+void TextNodeDumper::VisitUnresolvedLookupExpr(
+ const UnresolvedLookupExpr *Node) {
+ OS << " (";
+ if (!Node->requiresADL())
+ OS << "no ";
+ OS << "ADL) = '" << Node->getName() << '\'';
+
+ UnresolvedLookupExpr::decls_iterator I = Node->decls_begin(),
+ E = Node->decls_end();
+ if (I == E)
+ OS << " empty";
+ for (; I != E; ++I)
+ dumpPointer(*I);
+}
+
+void TextNodeDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << " " << Node->getDecl()->getDeclKindName() << "Decl";
+ }
+ OS << "='" << *Node->getDecl() << "'";
+ dumpPointer(Node->getDecl());
+ if (Node->isFreeIvar())
+ OS << " isFreeIvar";
+}
+
+void TextNodeDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
+ OS << " " << PredefinedExpr::getIdentKindName(Node->getIdentKind());
+}
+
+void TextNodeDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " " << Node->getValue();
+}
+
+void TextNodeDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " " << Node->getValue().toString(10, isSigned);
+}
+
+void TextNodeDumper::VisitFixedPointLiteral(const FixedPointLiteral *Node) {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " " << Node->getValueAsString(/*Radix=*/10);
+}
+
+void TextNodeDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " " << Node->getValueAsApproximateDouble();
+}
+
+void TextNodeDumper::VisitStringLiteral(const StringLiteral *Str) {
+ ColorScope Color(OS, ShowColors, ValueColor);
+ OS << " ";
+ Str->outputString(OS);
+}
+
+void TextNodeDumper::VisitInitListExpr(const InitListExpr *ILE) {
+ if (auto *Field = ILE->getInitializedFieldInUnion()) {
+ OS << " field ";
+ dumpBareDeclRef(Field);
+ }
+}
+
+void TextNodeDumper::VisitUnaryOperator(const UnaryOperator *Node) {
+ OS << " " << (Node->isPostfix() ? "postfix" : "prefix") << " '"
+ << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+ if (!Node->canOverflow())
+ OS << " cannot overflow";
+}
+
+void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *Node) {
+ switch (Node->getKind()) {
+ case UETT_SizeOf:
+ OS << " sizeof";
+ break;
+ case UETT_AlignOf:
+ OS << " alignof";
+ break;
+ case UETT_VecStep:
+ OS << " vec_step";
+ break;
+ case UETT_OpenMPRequiredSimdAlign:
+ OS << " __builtin_omp_required_simd_align";
+ break;
+ case UETT_PreferredAlignOf:
+ OS << " __alignof";
+ break;
+ }
+ if (Node->isArgumentType())
+ dumpType(Node->getArgumentType());
+}
+
+void TextNodeDumper::VisitMemberExpr(const MemberExpr *Node) {
+ OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
+ dumpPointer(Node->getMemberDecl());
+}
+
+void TextNodeDumper::VisitExtVectorElementExpr(
+ const ExtVectorElementExpr *Node) {
+ OS << " " << Node->getAccessor().getNameStart();
+}
+
+void TextNodeDumper::VisitBinaryOperator(const BinaryOperator *Node) {
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+
+void TextNodeDumper::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *Node) {
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
+ << "' ComputeLHSTy=";
+ dumpBareType(Node->getComputationLHSType());
+ OS << " ComputeResultTy=";
+ dumpBareType(Node->getComputationResultType());
+}
+
+void TextNodeDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
+ OS << " " << Node->getLabel()->getName();
+ dumpPointer(Node->getLabel());
+}
+
+void TextNodeDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
+ OS << " " << Node->getCastName() << "<"
+ << Node->getTypeAsWritten().getAsString() << ">"
+ << " <" << Node->getCastKindName();
+ dumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void TextNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
+ OS << " " << (Node->getValue() ? "true" : "false");
+}
+
+void TextNodeDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
+ OS << " this";
+}
+
+void TextNodeDumper::VisitCXXFunctionalCastExpr(
+ const CXXFunctionalCastExpr *Node) {
+ OS << " functional cast to " << Node->getTypeAsWritten().getAsString() << " <"
+ << Node->getCastKindName() << ">";
+}
+
+void TextNodeDumper::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *Node) {
+ dumpType(Node->getTypeAsWritten());
+ if (Node->isListInitialization())
+ OS << " list";
+}
+
+void TextNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
+ CXXConstructorDecl *Ctor = Node->getConstructor();
+ dumpType(Ctor->getType());
+ if (Node->isElidable())
+ OS << " elidable";
+ if (Node->isListInitialization())
+ OS << " list";
+ if (Node->isStdInitListInitialization())
+ OS << " std::initializer_list";
+ if (Node->requiresZeroInitialization())
+ OS << " zeroing";
+}
+
+void TextNodeDumper::VisitCXXBindTemporaryExpr(
+ const CXXBindTemporaryExpr *Node) {
+ OS << " ";
+ dumpCXXTemporary(Node->getTemporary());
+}
+
+void TextNodeDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
+ if (Node->isGlobalNew())
+ OS << " global";
+ if (Node->isArray())
+ OS << " array";
+ if (Node->getOperatorNew()) {
+ OS << ' ';
+ dumpBareDeclRef(Node->getOperatorNew());
+ }
+ // We could dump the deallocation function used in case of error, but it's
+ // usually not that interesting.
+}
+
+void TextNodeDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
+ if (Node->isGlobalDelete())
+ OS << " global";
+ if (Node->isArrayForm())
+ OS << " array";
+ if (Node->getOperatorDelete()) {
+ OS << ' ';
+ dumpBareDeclRef(Node->getOperatorDelete());
+ }
+}
+
+void TextNodeDumper::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *Node) {
+ if (const ValueDecl *VD = Node->getExtendingDecl()) {
+ OS << " extended by ";
+ dumpBareDeclRef(VD);
+ }
+}
+
+void TextNodeDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
+ for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
+ dumpDeclRef(Node->getObject(i), "cleanup");
+}
+
+void TextNodeDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
+ dumpPointer(Node->getPack());
+ dumpName(Node->getPack());
+}
+
+void TextNodeDumper::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *Node) {
+ OS << " " << (Node->isArrow() ? "->" : ".") << Node->getMember();
+}
+
+void TextNodeDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
+ OS << " selector=";
+ Node->getSelector().print(OS);
+ switch (Node->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << " class=";
+ dumpBareType(Node->getClassReceiver());
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ OS << " super (instance)";
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ OS << " super (class)";
+ break;
+ }
+}
+
+void TextNodeDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
+ if (auto *BoxingMethod = Node->getBoxingMethod()) {
+ OS << " selector=";
+ BoxingMethod->getSelector().print(OS);
+ }
+}
+
+void TextNodeDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
+ if (!Node->getCatchParamDecl())
+ OS << " catch all";
+}
+
+void TextNodeDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) {
+ dumpType(Node->getEncodedType());
+}
+
+void TextNodeDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
+ OS << " ";
+ Node->getSelector().print(OS);
+}
+
+void TextNodeDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
+ OS << ' ' << *Node->getProtocol();
+}
+
+void TextNodeDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
+ if (Node->isImplicitProperty()) {
+ OS << " Kind=MethodRef Getter=\"";
+ if (Node->getImplicitPropertyGetter())
+ Node->getImplicitPropertyGetter()->getSelector().print(OS);
+ else
+ OS << "(null)";
+
+ OS << "\" Setter=\"";
+ if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
+ Setter->getSelector().print(OS);
+ else
+ OS << "(null)";
+ OS << "\"";
+ } else {
+ OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty()
+ << '"';
+ }
+
+ if (Node->isSuperReceiver())
+ OS << " super";
+
+ OS << " Messaging=";
+ if (Node->isMessagingGetter() && Node->isMessagingSetter())
+ OS << "Getter&Setter";
+ else if (Node->isMessagingGetter())
+ OS << "Getter";
+ else if (Node->isMessagingSetter())
+ OS << "Setter";
+}
+
+void TextNodeDumper::VisitObjCSubscriptRefExpr(
+ const ObjCSubscriptRefExpr *Node) {
+ if (Node->isArraySubscriptRefExpr())
+ OS << " Kind=ArraySubscript GetterForArray=\"";
+ else
+ OS << " Kind=DictionarySubscript GetterForDictionary=\"";
+ if (Node->getAtIndexMethodDecl())
+ Node->getAtIndexMethodDecl()->getSelector().print(OS);
+ else
+ OS << "(null)";
+
+ if (Node->isArraySubscriptRefExpr())
+ OS << "\" SetterForArray=\"";
+ else
+ OS << "\" SetterForDictionary=\"";
+ if (Node->setAtIndexMethodDecl())
+ Node->setAtIndexMethodDecl()->getSelector().print(OS);
+ else
+ OS << "(null)";
+}
+
+void TextNodeDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
+ OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
+ if (T->isSpelledAsLValue())
+ OS << " written as lvalue reference";
+}
+
+void TextNodeDumper::VisitArrayType(const ArrayType *T) {
+ switch (T->getSizeModifier()) {
+ case ArrayType::Normal:
+ break;
+ case ArrayType::Static:
+ OS << " static";
+ break;
+ case ArrayType::Star:
+ OS << " *";
+ break;
+ }
+ OS << " " << T->getIndexTypeQualifiers().getAsString();
+}
+
+void TextNodeDumper::VisitConstantArrayType(const ConstantArrayType *T) {
+ OS << " " << T->getSize();
+ VisitArrayType(T);
+}
+
+void TextNodeDumper::VisitVariableArrayType(const VariableArrayType *T) {
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+ VisitArrayType(T);
+}
+
+void TextNodeDumper::VisitDependentSizedArrayType(
+ const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ OS << " ";
+ dumpSourceRange(T->getBracketsRange());
+}
+
+void TextNodeDumper::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ OS << " ";
+ dumpLocation(T->getAttributeLoc());
+}
+
+void TextNodeDumper::VisitVectorType(const VectorType *T) {
+ switch (T->getVectorKind()) {
+ case VectorType::GenericVector:
+ break;
+ case VectorType::AltiVecVector:
+ OS << " altivec";
+ break;
+ case VectorType::AltiVecPixel:
+ OS << " altivec pixel";
+ break;
+ case VectorType::AltiVecBool:
+ OS << " altivec bool";
+ break;
+ case VectorType::NeonVector:
+ OS << " neon";
+ break;
+ case VectorType::NeonPolyVector:
+ OS << " neon poly";
+ break;
+ }
+ OS << " " << T->getNumElements();
+}
+
+void TextNodeDumper::VisitFunctionType(const FunctionType *T) {
+ auto EI = T->getExtInfo();
+ if (EI.getNoReturn())
+ OS << " noreturn";
+ if (EI.getProducesResult())
+ OS << " produces_result";
+ if (EI.getHasRegParm())
+ OS << " regparm " << EI.getRegParm();
+ OS << " " << FunctionType::getNameForCallConv(EI.getCC());
+}
+
+void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
+ auto EPI = T->getExtProtoInfo();
+ if (EPI.HasTrailingReturn)
+ OS << " trailing_return";
+ if (T->isConst())
+ OS << " const";
+ if (T->isVolatile())
+ OS << " volatile";
+ if (T->isRestrict())
+ OS << " restrict";
+ switch (EPI.RefQualifier) {
+ case RQ_None:
+ break;
+ case RQ_LValue:
+ OS << " &";
+ break;
+ case RQ_RValue:
+ OS << " &&";
+ break;
+ }
+ // FIXME: Exception specification.
+ // FIXME: Consumed parameters.
+ VisitFunctionType(T);
+}
+
+void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitTypedefType(const TypedefType *T) {
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ OS << " underlying_type";
+ break;
+ }
+}
+
+void TextNodeDumper::VisitTagType(const TagType *T) {
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ OS << " depth " << T->getDepth() << " index " << T->getIndex();
+ if (T->isParameterPack())
+ OS << " pack";
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitAutoType(const AutoType *T) {
+ if (T->isDecltypeAuto())
+ OS << " decltype(auto)";
+ if (!T->isDeduced())
+ OS << " undeduced";
+}
+
+void TextNodeDumper::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ if (T->isTypeAlias())
+ OS << " alias";
+ OS << " ";
+ T->getTemplateName().dump(OS);
+}
+
+void TextNodeDumper::VisitInjectedClassNameType(
+ const InjectedClassNameType *T) {
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ dumpDeclRef(T->getDecl());
+}
+
+void TextNodeDumper::VisitPackExpansionType(const PackExpansionType *T) {
+ if (auto N = T->getNumExpansions())
+ OS << " expansions " << *N;
+}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index f79a59712a41..0dbc88c04521 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -291,6 +291,14 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
return Context.getQualifiedType(desugar, split.Quals);
}
+// Check that no type class is polymorphic. LLVM style RTTI should be used
+// instead. If absolutely needed an exception can still be added here by
+// defining the appropriate macro (but please don't do this).
+#define TYPE(CLASS, BASE) \
+ static_assert(!std::is_polymorphic<CLASS##Type>::value, \
+ #CLASS "Type should not be polymorphic!");
+#include "clang/AST/TypeNodes.def"
+
QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
switch (getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
@@ -592,28 +600,6 @@ bool Type::isObjCClassOrClassKindOfType() const {
return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
}
-/// Was this type written with the special inert-in-MRC __unsafe_unretained
-/// qualifier?
-///
-/// This approximates the answer to the following question: if this
-/// translation unit were compiled in ARC, would this type be qualified
-/// with __unsafe_unretained?
-bool Type::isObjCInertUnsafeUnretainedType() const {
- const Type *cur = this;
- while (true) {
- if (const auto attributed = dyn_cast<AttributedType>(cur)) {
- if (attributed->getAttrKind() ==
- AttributedType::attr_objc_inert_unsafe_unretained)
- return true;
- }
-
- // Single-step desugar until we run out of sugar.
- QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType();
- if (next.getTypePtr() == cur) return false;
- cur = next.getTypePtr();
- }
-}
-
ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D,
QualType can,
ArrayRef<ObjCProtocolDecl *> protocols)
@@ -1641,6 +1627,16 @@ TagDecl *Type::getAsTagDecl() const {
return nullptr;
}
+bool Type::hasAttr(attr::Kind AK) const {
+ const Type *Cur = this;
+ while (const auto *AT = Cur->getAs<AttributedType>()) {
+ if (AT->getAttrKind() == AK)
+ return true;
+ Cur = AT->getEquivalentType().getTypePtr();
+ }
+ return false;
+}
+
namespace {
class GetContainedDeducedTypeVisitor :
@@ -1977,6 +1973,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
if (BT->isInteger()) return STK_Integral;
if (BT->isFloatingPoint()) return STK_Floating;
+ if (BT->isFixedPointType()) return STK_FixedPoint;
llvm_unreachable("unknown scalar builtin type");
} else if (isa<PointerType>(T)) {
return STK_CPointer;
@@ -2604,7 +2601,8 @@ DependentTemplateSpecializationType::DependentTemplateSpecializationType(
: TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
/*VariablyModified=*/false,
NNS && NNS->containsUnexpandedParameterPack()),
- NNS(NNS), Name(Name), NumArgs(Args.size()) {
+ NNS(NNS), Name(Name) {
+ DependentTemplateSpecializationTypeBits.NumArgs = Args.size();
assert((!NNS || NNS->isDependent()) &&
"DependentTemplateSpecializatonType requires dependent qualifier");
TemplateArgument *ArgBuffer = getArgBuffer();
@@ -2796,6 +2794,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "reserve_id_t";
case OMPArraySection:
return "<OpenMP array section type>";
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case Id: \
+ return #ExtType;
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -2830,6 +2832,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86RegCall : return "regcall";
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
+ case CC_AArch64VectorCall: return "aarch64_vector_pcs";
case CC_IntelOclBicc: return "intel_ocl_bicc";
case CC_SpirFunction: return "spir_function";
case CC_OpenCLKernel: return "opencl_kernel";
@@ -2844,24 +2847,28 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
- : FunctionType(FunctionProto, result, canonical,
- result->isDependentType(),
+ : FunctionType(FunctionProto, result, canonical, result->isDependentType(),
result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
- result->containsUnexpandedParameterPack(), epi.ExtInfo),
- NumParams(params.size()),
- NumExceptions(epi.ExceptionSpec.Exceptions.size()),
- ExceptionSpecType(epi.ExceptionSpec.Type),
- HasExtParameterInfos(epi.ExtParameterInfos != nullptr),
- Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn) {
- assert(NumParams == params.size() && "function has too many parameters");
-
- FunctionTypeBits.TypeQuals = epi.TypeQuals;
+ result->containsUnexpandedParameterPack(), epi.ExtInfo) {
+ FunctionTypeBits.FastTypeQuals = epi.TypeQuals.getFastQualifiers();
FunctionTypeBits.RefQualifier = epi.RefQualifier;
+ FunctionTypeBits.NumParams = params.size();
+ assert(getNumParams() == params.size() && "NumParams overflow!");
+ FunctionTypeBits.ExceptionSpecType = epi.ExceptionSpec.Type;
+ FunctionTypeBits.HasExtParameterInfos = !!epi.ExtParameterInfos;
+ FunctionTypeBits.Variadic = epi.Variadic;
+ FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn;
+
+ // Fill in the extra trailing bitfields if present.
+ if (hasExtraBitfields(epi.ExceptionSpec.Type)) {
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+ }
// Fill in the trailing argument array.
- auto *argSlot = reinterpret_cast<QualType *>(this+1);
- for (unsigned i = 0; i != NumParams; ++i) {
+ auto *argSlot = getTrailingObjects<QualType>();
+ for (unsigned i = 0; i != getNumParams(); ++i) {
if (params[i]->isDependentType())
setDependent();
else if (params[i]->isInstantiationDependentType())
@@ -2873,9 +2880,11 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
argSlot[i] = params[i];
}
+ // Fill in the exception type array if present.
if (getExceptionSpecType() == EST_Dynamic) {
- // Fill in the exception array.
- QualType *exnSlot = argSlot + NumParams;
+ assert(hasExtraBitfields() && "missing trailing extra bitfields!");
+ auto *exnSlot =
+ reinterpret_cast<QualType *>(getTrailingObjects<ExceptionType>());
unsigned I = 0;
for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
// Note that, before C++17, a dependent exception specification does
@@ -2889,14 +2898,15 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
exnSlot[I++] = ExceptionType;
}
- } else if (isComputedNoexcept(getExceptionSpecType())) {
+ }
+ // Fill in the Expr * in the exception specification if present.
+ else if (isComputedNoexcept(getExceptionSpecType())) {
assert(epi.ExceptionSpec.NoexceptExpr && "computed noexcept with no expr");
assert((getExceptionSpecType() == EST_DependentNoexcept) ==
epi.ExceptionSpec.NoexceptExpr->isValueDependent());
// Store the noexcept expression and context.
- auto **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
- *noexSlot = epi.ExceptionSpec.NoexceptExpr;
+ *getTrailingObjects<Expr *>() = epi.ExceptionSpec.NoexceptExpr;
if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
@@ -2904,10 +2914,12 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
- } else if (getExceptionSpecType() == EST_Uninstantiated) {
+ }
+ // Fill in the FunctionDecl * in the exception specification if present.
+ else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
- auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = getTrailingObjects<FunctionDecl *>();
slot[0] = epi.ExceptionSpec.SourceDecl;
slot[1] = epi.ExceptionSpec.SourceTemplate;
// This exception specification doesn't make the type dependent, because
@@ -2915,7 +2927,7 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
} else if (getExceptionSpecType() == EST_Unevaluated) {
// Store the function decl from which we will resolve our
// exception specification.
- auto **slot = reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
+ auto **slot = getTrailingObjects<FunctionDecl *>();
slot[0] = epi.ExceptionSpec.SourceDecl;
}
@@ -2932,12 +2944,19 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
setDependent();
}
+ // Fill in the extra parameter info if present.
if (epi.ExtParameterInfos) {
- auto *extParamInfos =
- const_cast<ExtParameterInfo *>(getExtParameterInfosBuffer());
- for (unsigned i = 0; i != NumParams; ++i)
+ auto *extParamInfos = getTrailingObjects<ExtParameterInfo>();
+ for (unsigned i = 0; i != getNumParams(); ++i)
extParamInfos[i] = epi.ExtParameterInfos[i];
}
+
+ if (epi.TypeQuals.hasNonFastQualifiers()) {
+ FunctionTypeBits.HasExtQuals = 1;
+ *getTrailingObjects<Qualifiers>() = epi.TypeQuals;
+ } else {
+ FunctionTypeBits.HasExtQuals = 0;
+ }
}
bool FunctionProtoType::hasDependentExceptionSpec() const {
@@ -2981,7 +3000,7 @@ CanThrowResult FunctionProtoType::canThrow() const {
case EST_Dynamic:
// A dynamic exception specification is throwing unless every exception
// type is an (unexpanded) pack expansion type.
- for (unsigned I = 0, N = NumExceptions; I != N; ++I)
+ for (unsigned I = 0; I != getNumExceptions(); ++I)
if (!getExceptionType(I)->getAs<PackExpansionType>())
return CT_Can;
return CT_Dependent;
@@ -3029,14 +3048,13 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// shortcut, use one AddInteger call instead of four for the next four
// fields.
assert(!(unsigned(epi.Variadic) & ~1) &&
- !(unsigned(epi.TypeQuals) & ~255) &&
!(unsigned(epi.RefQualifier) & ~3) &&
!(unsigned(epi.ExceptionSpec.Type) & ~15) &&
"Values larger than expected.");
ID.AddInteger(unsigned(epi.Variadic) +
- (epi.TypeQuals << 1) +
- (epi.RefQualifier << 9) +
- (epi.ExceptionSpec.Type << 11));
+ (epi.RefQualifier << 1) +
+ (epi.ExceptionSpec.Type << 3));
+ ID.Add(epi.TypeQuals);
if (epi.ExceptionSpec.Type == EST_Dynamic) {
for (QualType Ex : epi.ExceptionSpec.Exceptions)
ID.AddPointer(Ex.getAsOpaquePtr());
@@ -3056,8 +3074,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
- Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
- Ctx, isCanonicalUnqualified());
+ Profile(ID, getReturnType(), param_type_begin(), getNumParams(),
+ getExtProtoInfo(), Ctx, isCanonicalUnqualified());
}
QualType TypedefType::desugar() const {
@@ -3154,118 +3172,81 @@ bool TagType::isBeingDefined() const {
}
bool RecordType::hasConstFields() const {
- for (FieldDecl *FD : getDecl()->fields()) {
- QualType FieldTy = FD->getType();
- if (FieldTy.isConstQualified())
- return true;
- FieldTy = FieldTy.getCanonicalType();
- if (const auto *FieldRecTy = FieldTy->getAs<RecordType>())
- if (FieldRecTy->hasConstFields())
+ std::vector<const RecordType*> RecordTypeList;
+ RecordTypeList.push_back(this);
+ unsigned NextToCheckIndex = 0;
+
+ while (RecordTypeList.size() > NextToCheckIndex) {
+ for (FieldDecl *FD :
+ RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ QualType FieldTy = FD->getType();
+ if (FieldTy.isConstQualified())
return true;
+ FieldTy = FieldTy.getCanonicalType();
+ if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ RecordTypeList.push_back(FieldRecTy);
+ }
+ }
+ ++NextToCheckIndex;
}
return false;
}
bool AttributedType::isQualifier() const {
+ // FIXME: Generate this with TableGen.
switch (getAttrKind()) {
// These are type qualifiers in the traditional C sense: they annotate
// something about a specific value/variable of a type. (They aren't
// always part of the canonical type, though.)
- case AttributedType::attr_address_space:
- case AttributedType::attr_objc_gc:
- case AttributedType::attr_objc_ownership:
- case AttributedType::attr_objc_inert_unsafe_unretained:
- case AttributedType::attr_nonnull:
- case AttributedType::attr_nullable:
- case AttributedType::attr_null_unspecified:
- case AttributedType::attr_lifetimebound:
+ case attr::ObjCGC:
+ case attr::ObjCOwnership:
+ case attr::ObjCInertUnsafeUnretained:
+ case attr::TypeNonNull:
+ case attr::TypeNullable:
+ case attr::TypeNullUnspecified:
+ case attr::LifetimeBound:
return true;
- // These aren't qualifiers; they rewrite the modified type to be a
- // semantically different type.
- case AttributedType::attr_regparm:
- case AttributedType::attr_vector_size:
- case AttributedType::attr_neon_vector_type:
- case AttributedType::attr_neon_polyvector_type:
- case AttributedType::attr_pcs:
- case AttributedType::attr_pcs_vfp:
- case AttributedType::attr_noreturn:
- case AttributedType::attr_cdecl:
- case AttributedType::attr_fastcall:
- case AttributedType::attr_stdcall:
- case AttributedType::attr_thiscall:
- case AttributedType::attr_regcall:
- case AttributedType::attr_pascal:
- case AttributedType::attr_swiftcall:
- case AttributedType::attr_vectorcall:
- case AttributedType::attr_inteloclbicc:
- case AttributedType::attr_preserve_most:
- case AttributedType::attr_preserve_all:
- case AttributedType::attr_ms_abi:
- case AttributedType::attr_sysv_abi:
- case AttributedType::attr_ptr32:
- case AttributedType::attr_ptr64:
- case AttributedType::attr_sptr:
- case AttributedType::attr_uptr:
- case AttributedType::attr_objc_kindof:
- case AttributedType::attr_ns_returns_retained:
- case AttributedType::attr_nocf_check:
+ // All other type attributes aren't qualifiers; they rewrite the modified
+ // type to be a semantically different type.
+ default:
return false;
}
- llvm_unreachable("bad attributed type kind");
}
bool AttributedType::isMSTypeSpec() const {
+ // FIXME: Generate this with TableGen?
switch (getAttrKind()) {
- default: return false;
- case attr_ptr32:
- case attr_ptr64:
- case attr_sptr:
- case attr_uptr:
+ default: return false;
+ case attr::Ptr32:
+ case attr::Ptr64:
+ case attr::SPtr:
+ case attr::UPtr:
return true;
}
llvm_unreachable("invalid attr kind");
}
bool AttributedType::isCallingConv() const {
+ // FIXME: Generate this with TableGen.
switch (getAttrKind()) {
- case attr_ptr32:
- case attr_ptr64:
- case attr_sptr:
- case attr_uptr:
- case attr_address_space:
- case attr_regparm:
- case attr_vector_size:
- case attr_neon_vector_type:
- case attr_neon_polyvector_type:
- case attr_objc_gc:
- case attr_objc_ownership:
- case attr_objc_inert_unsafe_unretained:
- case attr_noreturn:
- case attr_nonnull:
- case attr_ns_returns_retained:
- case attr_nullable:
- case attr_null_unspecified:
- case attr_objc_kindof:
- case attr_nocf_check:
- case attr_lifetimebound:
- return false;
-
- case attr_pcs:
- case attr_pcs_vfp:
- case attr_cdecl:
- case attr_fastcall:
- case attr_stdcall:
- case attr_thiscall:
- case attr_regcall:
- case attr_swiftcall:
- case attr_vectorcall:
- case attr_pascal:
- case attr_ms_abi:
- case attr_sysv_abi:
- case attr_inteloclbicc:
- case attr_preserve_most:
- case attr_preserve_all:
+ default: return false;
+ case attr::Pcs:
+ case attr::CDecl:
+ case attr::FastCall:
+ case attr::StdCall:
+ case attr::ThisCall:
+ case attr::RegCall:
+ case attr::SwiftCall:
+ case attr::VectorCall:
+ case attr::AArch64VectorPcs:
+ case attr::Pascal:
+ case attr::MSABI:
+ case attr::SysVABI:
+ case attr::IntelOclBicc:
+ case attr::PreserveMost:
+ case attr::PreserveAll:
return true;
}
llvm_unreachable("invalid attr kind");
@@ -3284,11 +3265,12 @@ SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
QualType Canon,
const TemplateArgument &ArgPack)
: Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
- Replaced(Param),
- Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size()) {}
+ Replaced(Param), Arguments(ArgPack.pack_begin()) {
+ SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size();
+}
TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
- return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
+ return TemplateArgument(llvm::makeArrayRef(Arguments, getNumArgs()));
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
@@ -3335,8 +3317,10 @@ TemplateSpecializationType(TemplateName T,
Canon.isNull()? true : Canon->isDependentType(),
Canon.isNull()? true : Canon->isInstantiationDependentType(),
false,
- T.containsUnexpandedParameterPack()),
- Template(T), NumArgs(Args.size()), TypeAlias(!AliasedType.isNull()) {
+ T.containsUnexpandedParameterPack()), Template(T) {
+ TemplateSpecializationTypeBits.NumArgs = Args.size();
+ TemplateSpecializationTypeBits.TypeAlias = !AliasedType.isNull();
+
assert(!T.getAsDependentTemplateName() &&
"Use DependentTemplateSpecializationType for dependent template-name");
assert((T.getKind() == TemplateName::Template ||
@@ -3365,7 +3349,7 @@ TemplateSpecializationType(TemplateName T,
}
// Store the aliased type if this is a type alias template specialization.
- if (TypeAlias) {
+ if (isTypeAlias()) {
auto *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
*reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
}
@@ -3708,23 +3692,18 @@ LinkageInfo Type::getLinkageAndVisibility() const {
return LinkageComputer{}.getTypeLinkageAndVisibility(this);
}
-Optional<NullabilityKind> Type::getNullability(const ASTContext &context) const {
- QualType type(this, 0);
- do {
+Optional<NullabilityKind>
+Type::getNullability(const ASTContext &Context) const {
+ QualType Type(this, 0);
+ while (const auto *AT = Type->getAs<AttributedType>()) {
// Check whether this is an attributed type with nullability
// information.
- if (auto attributed = dyn_cast<AttributedType>(type.getTypePtr())) {
- if (auto nullability = attributed->getImmediateNullability())
- return nullability;
- }
+ if (auto Nullability = AT->getImmediateNullability())
+ return Nullability;
- // Desugar the type. If desugaring does nothing, we're done.
- QualType desugared = type.getSingleStepDesugaredType(context);
- if (desugared.getTypePtr() == type.getTypePtr())
- return None;
-
- type = desugared;
- } while (true);
+ Type = AT->getEquivalentType();
+ }
+ return None;
}
bool Type::canHaveNullability(bool ResultIfUnknown) const {
@@ -3796,6 +3775,9 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -3837,12 +3819,13 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
llvm_unreachable("bad type kind!");
}
-llvm::Optional<NullabilityKind> AttributedType::getImmediateNullability() const {
- if (getAttrKind() == AttributedType::attr_nonnull)
+llvm::Optional<NullabilityKind>
+AttributedType::getImmediateNullability() const {
+ if (getAttrKind() == attr::TypeNonNull)
return NullabilityKind::NonNull;
- if (getAttrKind() == AttributedType::attr_nullable)
+ if (getAttrKind() == attr::TypeNullable)
return NullabilityKind::Nullable;
- if (getAttrKind() == AttributedType::attr_null_unspecified)
+ if (getAttrKind() == attr::TypeNullUnspecified)
return NullabilityKind::Unspecified;
return None;
}
@@ -4032,17 +4015,26 @@ CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
}
void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
- const llvm::APSInt &Val, unsigned Scale,
- unsigned Radix) {
- llvm::APSInt ScaleVal = llvm::APSInt::getUnsigned(1ULL << Scale);
- llvm::APSInt IntPart = Val / ScaleVal;
- llvm::APSInt FractPart = Val % ScaleVal;
- llvm::APSInt RadixInt = llvm::APSInt::getUnsigned(Radix);
-
- IntPart.toString(Str, Radix);
+ llvm::APSInt Val, unsigned Scale) {
+ if (Val.isSigned() && Val.isNegative() && Val != -Val) {
+ Val = -Val;
+ Str.push_back('-');
+ }
+
+ llvm::APSInt IntPart = Val >> Scale;
+
+ // Add 4 digits to hold the value after multiplying 10 (the radix)
+ unsigned Width = Val.getBitWidth() + 4;
+ llvm::APInt FractPart = Val.zextOrTrunc(Scale).zext(Width);
+ llvm::APInt FractPartMask = llvm::APInt::getAllOnesValue(Scale).zext(Width);
+ llvm::APInt RadixInt = llvm::APInt(Width, 10);
+
+ IntPart.toString(Str, /*radix=*/10);
Str.push_back('.');
do {
- (FractPart * RadixInt / ScaleVal).toString(Str, Radix);
- FractPart = (FractPart * RadixInt) % ScaleVal;
- } while (FractPart.getExtValue());
+ (FractPart * RadixInt)
+ .lshr(Scale)
+ .toString(Str, /*radix=*/10, Val.isSigned());
+ FractPart = (FractPart * RadixInt) & FractPartMask;
+ } while (FractPart != 0);
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index e4fd6f106e33..b7b2f188d716 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -384,6 +384,9 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -404,11 +407,11 @@ TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
}
SourceLocation TypeLoc::findNullabilityLoc() const {
- if (auto attributedLoc = getAs<AttributedTypeLoc>()) {
- if (attributedLoc.getAttrKind() == AttributedType::attr_nullable ||
- attributedLoc.getAttrKind() == AttributedType::attr_nonnull ||
- attributedLoc.getAttrKind() == AttributedType::attr_null_unspecified)
- return attributedLoc.getAttrNameLoc();
+ if (auto ATL = getAs<AttributedTypeLoc>()) {
+ const Attr *A = ATL.getAttr();
+ if (A && (isa<TypeNullableAttr>(A) || isa<TypeNonNullAttr>(A) ||
+ isa<TypeNullUnspecifiedAttr>(A)))
+ return A->getLocation();
}
return {};
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index e032c312aa12..32c75afb4381 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -117,9 +117,7 @@ namespace {
void spaceBeforePlaceHolder(raw_ostream &OS);
void printTypeSpec(NamedDecl *D, raw_ostream &OS);
- void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
void printBefore(QualType T, raw_ostream &OS);
- void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
void printAfter(QualType T, raw_ostream &OS);
void AppendScope(DeclContext *DC, raw_ostream &OS);
void printTag(TagDecl *T, raw_ostream &OS);
@@ -129,6 +127,10 @@ namespace {
void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
#include "clang/AST/TypeNodes.def"
+
+ private:
+ void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
};
} // namespace
@@ -160,8 +162,15 @@ void TypePrinter::spaceBeforePlaceHolder(raw_ostream &OS) {
OS << ' ';
}
+static SplitQualType splitAccordingToPolicy(QualType QT,
+ const PrintingPolicy &Policy) {
+ if (Policy.PrintCanonicalTypes)
+ QT = QT.getCanonicalType();
+ return QT.split();
+}
+
void TypePrinter::print(QualType t, raw_ostream &OS, StringRef PlaceHolder) {
- SplitQualType split = t.split();
+ SplitQualType split = splitAccordingToPolicy(t, Policy);
print(split.Ty, split.Quals, OS, PlaceHolder);
}
@@ -260,7 +269,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
}
void TypePrinter::printBefore(QualType T, raw_ostream &OS) {
- SplitQualType Split = T.split();
+ SplitQualType Split = splitAccordingToPolicy(T, Policy);
// If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2
// at this level.
@@ -320,7 +329,7 @@ void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
}
void TypePrinter::printAfter(QualType t, raw_ostream &OS) {
- SplitQualType split = t.split();
+ SplitQualType split = splitAccordingToPolicy(t, Policy);
printAfter(split.Ty, split.Quals, OS);
}
@@ -801,10 +810,8 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
printFunctionAfter(Info, OS);
- if (unsigned quals = T->getTypeQuals()) {
- OS << ' ';
- AppendTypeQualList(OS, quals, Policy.Restrict);
- }
+ if (!T->getTypeQuals().empty())
+ OS << " " << T->getTypeQuals().getAsString();
switch (T->getRefQualifier()) {
case RQ_None:
@@ -861,6 +868,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_AAPCS_VFP:
OS << " __attribute__((pcs(\"aapcs-vfp\")))";
break;
+ case CC_AArch64VectorCall:
+ OS << "__attribute__((aarch64_vector_pcs))";
+ break;
case CC_IntelOclBicc:
OS << " __attribute__((intel_ocl_bicc))";
break;
@@ -1154,9 +1164,13 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
D->getLocation());
if (PLoc.isValid()) {
- OS << " at " << PLoc.getFilename()
- << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
+ OS << " at ";
+ StringRef File = PLoc.getFilename();
+ if (Policy.RemapFilePaths)
+ OS << Policy.remapPath(File);
+ else
+ OS << File;
+ OS << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
}
}
@@ -1354,12 +1368,14 @@ void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
void TypePrinter::printAttributedBefore(const AttributedType *T,
raw_ostream &OS) {
+ // FIXME: Generate this with TableGen.
+
// Prefer the macro forms of the GC and ownership qualifiers.
- if (T->getAttrKind() == AttributedType::attr_objc_gc ||
- T->getAttrKind() == AttributedType::attr_objc_ownership)
+ if (T->getAttrKind() == attr::ObjCGC ||
+ T->getAttrKind() == attr::ObjCOwnership)
return printBefore(T->getEquivalentType(), OS);
- if (T->getAttrKind() == AttributedType::attr_objc_kindof)
+ if (T->getAttrKind() == attr::ObjCKindOf)
OS << "__kindof ";
printBefore(T->getModifiedType(), OS);
@@ -1367,23 +1383,21 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
if (T->isMSTypeSpec()) {
switch (T->getAttrKind()) {
default: return;
- case AttributedType::attr_ptr32: OS << " __ptr32"; break;
- case AttributedType::attr_ptr64: OS << " __ptr64"; break;
- case AttributedType::attr_sptr: OS << " __sptr"; break;
- case AttributedType::attr_uptr: OS << " __uptr"; break;
+ case attr::Ptr32: OS << " __ptr32"; break;
+ case attr::Ptr64: OS << " __ptr64"; break;
+ case attr::SPtr: OS << " __sptr"; break;
+ case attr::UPtr: OS << " __uptr"; break;
}
spaceBeforePlaceHolder(OS);
}
// Print nullability type specifiers.
- if (T->getAttrKind() == AttributedType::attr_nonnull ||
- T->getAttrKind() == AttributedType::attr_nullable ||
- T->getAttrKind() == AttributedType::attr_null_unspecified) {
- if (T->getAttrKind() == AttributedType::attr_nonnull)
+ if (T->getImmediateNullability()) {
+ if (T->getAttrKind() == attr::TypeNonNull)
OS << " _Nonnull";
- else if (T->getAttrKind() == AttributedType::attr_nullable)
+ else if (T->getAttrKind() == attr::TypeNullable)
OS << " _Nullable";
- else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
+ else if (T->getAttrKind() == attr::TypeNullUnspecified)
OS << " _Null_unspecified";
else
llvm_unreachable("unhandled nullability");
@@ -1393,140 +1407,96 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
void TypePrinter::printAttributedAfter(const AttributedType *T,
raw_ostream &OS) {
+ // FIXME: Generate this with TableGen.
+
// Prefer the macro forms of the GC and ownership qualifiers.
- if (T->getAttrKind() == AttributedType::attr_objc_gc ||
- T->getAttrKind() == AttributedType::attr_objc_ownership)
+ if (T->getAttrKind() == attr::ObjCGC ||
+ T->getAttrKind() == attr::ObjCOwnership)
return printAfter(T->getEquivalentType(), OS);
- if (T->getAttrKind() == AttributedType::attr_objc_kindof)
- return;
-
- // TODO: not all attributes are GCC-style attributes.
- if (T->isMSTypeSpec())
- return;
-
- // Nothing to print after.
- if (T->getAttrKind() == AttributedType::attr_nonnull ||
- T->getAttrKind() == AttributedType::attr_nullable ||
- T->getAttrKind() == AttributedType::attr_null_unspecified)
- return printAfter(T->getModifiedType(), OS);
-
// If this is a calling convention attribute, don't print the implicit CC from
// the modified type.
SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv());
printAfter(T->getModifiedType(), OS);
+ // Some attributes are printed as qualifiers before the type, so we have
+ // nothing left to do.
+ if (T->getAttrKind() == attr::ObjCKindOf ||
+ T->isMSTypeSpec() || T->getImmediateNullability())
+ return;
+
// Don't print the inert __unsafe_unretained attribute at all.
- if (T->getAttrKind() == AttributedType::attr_objc_inert_unsafe_unretained)
+ if (T->getAttrKind() == attr::ObjCInertUnsafeUnretained)
return;
// Don't print ns_returns_retained unless it had an effect.
- if (T->getAttrKind() == AttributedType::attr_ns_returns_retained &&
+ if (T->getAttrKind() == attr::NSReturnsRetained &&
!T->getEquivalentType()->castAs<FunctionType>()
->getExtInfo().getProducesResult())
return;
- // Print nullability type specifiers that occur after
- if (T->getAttrKind() == AttributedType::attr_nonnull ||
- T->getAttrKind() == AttributedType::attr_nullable ||
- T->getAttrKind() == AttributedType::attr_null_unspecified) {
- if (T->getAttrKind() == AttributedType::attr_nonnull)
- OS << " _Nonnull";
- else if (T->getAttrKind() == AttributedType::attr_nullable)
- OS << " _Nullable";
- else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
- OS << " _Null_unspecified";
- else
- llvm_unreachable("unhandled nullability");
-
+ if (T->getAttrKind() == attr::LifetimeBound) {
+ OS << " [[clang::lifetimebound]]";
return;
}
- if (T->getAttrKind() == AttributedType::attr_lifetimebound) {
- OS << " [[clang::lifetimebound]]";
+ // The printing of the address_space attribute is handled by the qualifier
+ // since it is still stored in the qualifier. Return early to prevent printing
+ // this twice.
+ if (T->getAttrKind() == attr::AddressSpace)
return;
- }
OS << " __attribute__((";
switch (T->getAttrKind()) {
- case AttributedType::attr_lifetimebound:
- case AttributedType::attr_nonnull:
- case AttributedType::attr_nullable:
- case AttributedType::attr_null_unspecified:
- case AttributedType::attr_objc_gc:
- case AttributedType::attr_objc_inert_unsafe_unretained:
- case AttributedType::attr_objc_kindof:
- case AttributedType::attr_objc_ownership:
- case AttributedType::attr_ptr32:
- case AttributedType::attr_ptr64:
- case AttributedType::attr_sptr:
- case AttributedType::attr_uptr:
- llvm_unreachable("This attribute should have been handled already");
-
- case AttributedType::attr_address_space:
- OS << "address_space(";
- // FIXME: printing the raw LangAS value is wrong. This should probably
- // use the same code as Qualifiers::print()
- OS << (unsigned)T->getEquivalentType().getAddressSpace();
- OS << ')';
- break;
-
- case AttributedType::attr_vector_size:
- OS << "__vector_size__(";
- if (const auto *vector = T->getEquivalentType()->getAs<VectorType>()) {
- OS << vector->getNumElements();
- OS << " * sizeof(";
- print(vector->getElementType(), OS, StringRef());
- OS << ')';
- }
- OS << ')';
- break;
-
- case AttributedType::attr_neon_vector_type:
- case AttributedType::attr_neon_polyvector_type: {
- if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
- OS << "neon_vector_type(";
- else
- OS << "neon_polyvector_type(";
- const auto *vector = T->getEquivalentType()->getAs<VectorType>();
- OS << vector->getNumElements();
- OS << ')';
+#define TYPE_ATTR(NAME)
+#define DECL_OR_TYPE_ATTR(NAME)
+#define ATTR(NAME) case attr::NAME:
+#include "clang/Basic/AttrList.inc"
+ llvm_unreachable("non-type attribute attached to type");
+
+ case attr::OpenCLPrivateAddressSpace:
+ case attr::OpenCLGlobalAddressSpace:
+ case attr::OpenCLLocalAddressSpace:
+ case attr::OpenCLConstantAddressSpace:
+ case attr::OpenCLGenericAddressSpace:
+ // FIXME: Update printAttributedBefore to print these once we generate
+ // AttributedType nodes for them.
break;
- }
- case AttributedType::attr_regparm: {
- // FIXME: When Sema learns to form this AttributedType, avoid printing the
- // attribute again in printFunctionProtoAfter.
- OS << "regparm(";
- QualType t = T->getEquivalentType();
- while (!t->isFunctionType())
- t = t->getPointeeType();
- OS << t->getAs<FunctionType>()->getRegParmType();
- OS << ')';
- break;
- }
+ case attr::LifetimeBound:
+ case attr::TypeNonNull:
+ case attr::TypeNullable:
+ case attr::TypeNullUnspecified:
+ case attr::ObjCGC:
+ case attr::ObjCInertUnsafeUnretained:
+ case attr::ObjCKindOf:
+ case attr::ObjCOwnership:
+ case attr::Ptr32:
+ case attr::Ptr64:
+ case attr::SPtr:
+ case attr::UPtr:
+ case attr::AddressSpace:
+ llvm_unreachable("This attribute should have been handled already");
- case AttributedType::attr_ns_returns_retained:
+ case attr::NSReturnsRetained:
OS << "ns_returns_retained";
break;
// FIXME: When Sema learns to form this AttributedType, avoid printing the
// attribute again in printFunctionProtoAfter.
- case AttributedType::attr_noreturn: OS << "noreturn"; break;
- case AttributedType::attr_nocf_check: OS << "nocf_check"; break;
- case AttributedType::attr_cdecl: OS << "cdecl"; break;
- case AttributedType::attr_fastcall: OS << "fastcall"; break;
- case AttributedType::attr_stdcall: OS << "stdcall"; break;
- case AttributedType::attr_thiscall: OS << "thiscall"; break;
- case AttributedType::attr_swiftcall: OS << "swiftcall"; break;
- case AttributedType::attr_vectorcall: OS << "vectorcall"; break;
- case AttributedType::attr_pascal: OS << "pascal"; break;
- case AttributedType::attr_ms_abi: OS << "ms_abi"; break;
- case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break;
- case AttributedType::attr_regcall: OS << "regcall"; break;
- case AttributedType::attr_pcs:
- case AttributedType::attr_pcs_vfp: {
+ case attr::AnyX86NoCfCheck: OS << "nocf_check"; break;
+ case attr::CDecl: OS << "cdecl"; break;
+ case attr::FastCall: OS << "fastcall"; break;
+ case attr::StdCall: OS << "stdcall"; break;
+ case attr::ThisCall: OS << "thiscall"; break;
+ case attr::SwiftCall: OS << "swiftcall"; break;
+ case attr::VectorCall: OS << "vectorcall"; break;
+ case attr::Pascal: OS << "pascal"; break;
+ case attr::MSABI: OS << "ms_abi"; break;
+ case attr::SysVABI: OS << "sysv_abi"; break;
+ case attr::RegCall: OS << "regcall"; break;
+ case attr::Pcs: {
OS << "pcs(";
QualType t = T->getEquivalentType();
while (!t->isFunctionType())
@@ -1536,15 +1506,18 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
OS << ')';
break;
}
-
- case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break;
- case AttributedType::attr_preserve_most:
+ case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break;
+ case attr::IntelOclBicc: OS << "inteloclbicc"; break;
+ case attr::PreserveMost:
OS << "preserve_most";
break;
- case AttributedType::attr_preserve_all:
+ case attr::PreserveAll:
OS << "preserve_all";
break;
+ case attr::NoDeref:
+ OS << "noderef";
+ break;
}
OS << "))";
}
@@ -1851,6 +1824,12 @@ std::string QualType::getAsString(const Type *ty, Qualifiers qs,
return buffer;
}
+void QualType::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ const Twine &PlaceHolder, unsigned Indentation) const {
+ print(splitAccordingToPolicy(*this, Policy), OS, Policy, PlaceHolder,
+ Indentation);
+}
+
void QualType::print(const Type *ty, Qualifiers qs,
raw_ostream &OS, const PrintingPolicy &policy,
const Twine &PlaceHolder, unsigned Indentation) {
@@ -1860,6 +1839,12 @@ void QualType::print(const Type *ty, Qualifiers qs,
TypePrinter(policy, Indentation).print(ty, qs, OS, PH);
}
+void QualType::getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const {
+ return getAsStringInternal(splitAccordingToPolicy(*this, Policy), Str,
+ Policy);
+}
+
void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
std::string &buffer,
const PrintingPolicy &policy) {
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index dfc5774ab498..846a6085743e 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -2105,8 +2105,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
const CXXMethodDecl *MD = I.second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
- llvm::sort(ThunksVector.begin(), ThunksVector.end(),
- [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ llvm::sort(ThunksVector, [](const ThunkInfo &LHS, const ThunkInfo &RHS) {
assert(LHS.Method == nullptr && RHS.Method == nullptr);
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
});
@@ -2206,13 +2205,12 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
else
this->VTableIndices = OwningArrayRef<size_t>(VTableIndices);
- llvm::sort(this->VTableThunks.begin(), this->VTableThunks.end(),
- [](const VTableLayout::VTableThunkTy &LHS,
- const VTableLayout::VTableThunkTy &RHS) {
- assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
- "Different thunks should have unique indices!");
- return LHS.first < RHS.first;
- });
+ llvm::sort(this->VTableThunks, [](const VTableLayout::VTableThunkTy &LHS,
+ const VTableLayout::VTableThunkTy &RHS) {
+ assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+ "Different thunks should have unique indices!");
+ return LHS.first < RHS.first;
+ });
}
VTableLayout::~VTableLayout() { }
@@ -3345,8 +3343,7 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
PathsSorted.reserve(Paths.size());
for (auto& P : Paths)
PathsSorted.push_back(*P);
- llvm::sort(PathsSorted.begin(), PathsSorted.end(),
- [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
+ llvm::sort(PathsSorted, [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
return LHS.MangledPath < RHS.MangledPath;
});
bool Changed = false;
@@ -3409,10 +3406,9 @@ static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) {
for (const FullPathTy &OtherPath : FullPaths) {
if (&SpecificPath == &OtherPath)
continue;
- if (std::all_of(SpecificPath.begin(), SpecificPath.end(),
- [&](const BaseSubobject &BSO) {
- return OtherPath.count(BSO) != 0;
- })) {
+ if (llvm::all_of(SpecificPath, [&](const BaseSubobject &BSO) {
+ return OtherPath.count(BSO) != 0;
+ })) {
return true;
}
}
@@ -3488,10 +3484,9 @@ static const FullPathTy *selectBestPath(ASTContext &Context,
// It's possible that the overrider isn't in this path. If so, skip it
// because this path didn't introduce it.
const CXXRecordDecl *OverridingParent = OverridingMethod->getParent();
- if (std::none_of(SpecificPath.begin(), SpecificPath.end(),
- [&](const BaseSubobject &BSO) {
- return BSO.getBase() == OverridingParent;
- }))
+ if (llvm::none_of(SpecificPath, [&](const BaseSubobject &BSO) {
+ return BSO.getBase() == OverridingParent;
+ }))
continue;
CurrentOverrides.insert(OverridingMethod);
}
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index 63f8395b8277..dec2e2ad1f93 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -635,10 +635,6 @@ private:
bool memoizedMatchesAncestorOfRecursively(
const ast_type_traits::DynTypedNode &Node, const DynTypedMatcher &Matcher,
BoundNodesTreeBuilder *Builder, AncestorMatchMode MatchMode) {
- if (Node.get<TranslationUnitDecl>() ==
- ActiveASTContext->getTranslationUnitDecl())
- return false;
-
// For AST-nodes that don't have an identity, we can't memoize.
if (!Builder->isComparable())
return matchesAncestorOfRecursively(Node, Matcher, Builder, MatchMode);
@@ -673,7 +669,26 @@ private:
BoundNodesTreeBuilder *Builder,
AncestorMatchMode MatchMode) {
const auto &Parents = ActiveASTContext->getParents(Node);
- assert(!Parents.empty() && "Found node that is not in the parent map.");
+ if (Parents.empty()) {
+ // Nodes may have no parents if:
+ // a) the node is the TranslationUnitDecl
+ // b) we have a limited traversal scope that excludes the parent edges
+ // c) there is a bug in the AST, and the node is not reachable
+ // Usually the traversal scope is the whole AST, which precludes b.
+ // Bugs are common enough that it's worthwhile asserting when we can.
+#ifndef NDEBUG
+ if (!Node.get<TranslationUnitDecl>() &&
+ /* Traversal scope is full AST if any of the bounds are the TU */
+ llvm::any_of(ActiveASTContext->getTraversalScope(), [](Decl *D) {
+ return D->getKind() == Decl::TranslationUnit;
+ })) {
+ llvm::errs() << "Tried to match orphan node:\n";
+ Node.dump(llvm::errs(), ActiveASTContext->getSourceManager());
+ llvm_unreachable("Parent map should be complete!");
+ }
+#endif
+ return false;
+ }
if (Parents.size() == 1) {
// Only one parent - do recursive memoization.
const ast_type_traits::DynTypedNode Parent = Parents[0];
@@ -1019,7 +1034,7 @@ void MatchFinder::matchAST(ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
Visitor.set_active_ast_context(&Context);
Visitor.onStartOfTranslationUnit();
- Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ Visitor.TraverseAST(Context);
Visitor.onEndOfTranslationUnit();
}
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 9cea2f5efc5b..e1aae172a8d6 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -144,10 +144,10 @@ DynTypedMatcher DynTypedMatcher::constructVariadic(
ast_type_traits::ASTNodeKind SupportedKind,
std::vector<DynTypedMatcher> InnerMatchers) {
assert(!InnerMatchers.empty() && "Array must not be empty.");
- assert(std::all_of(InnerMatchers.begin(), InnerMatchers.end(),
- [SupportedKind](const DynTypedMatcher &M) {
- return M.canConvertTo(SupportedKind);
- }) &&
+ assert(llvm::all_of(InnerMatchers,
+ [SupportedKind](const DynTypedMatcher &M) {
+ return M.canConvertTo(SupportedKind);
+ }) &&
"InnerMatchers must be convertible to SupportedKind!");
// We must relax the restrict kind here.
@@ -449,7 +449,7 @@ bool HasNameMatcher::matchesNodeUnqualified(const NamedDecl &Node) const {
assert(UseUnqualifiedMatch);
llvm::SmallString<128> Scratch;
StringRef NodeName = getNodeName(Node, Scratch);
- return std::any_of(Names.begin(), Names.end(), [&](StringRef Name) {
+ return llvm::any_of(Names, [&](StringRef Name) {
return consumeNameSuffix(Name, NodeName) && Name.empty();
});
}
@@ -573,6 +573,9 @@ const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
const internal::VariadicDynCastAllOfMatcher<Decl,
ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
+const internal::VariadicDynCastAllOfMatcher<
+ Decl, ClassTemplatePartialSpecializationDecl>
+ classTemplatePartialSpecializationDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl;
@@ -603,6 +606,8 @@ const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
+ indirectFieldDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
@@ -610,6 +615,10 @@ const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
const internal::VariadicAllOfMatcher<Stmt> stmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
+ unresolvedMemberExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr>
+ cxxDependentScopeMemberExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
@@ -659,6 +668,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
@@ -680,6 +690,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
@@ -710,6 +721,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index da8df907ba7f..5db10048fdf8 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -110,6 +110,10 @@ private:
}
switch (Code[0]) {
+ case '#':
+ Result.Kind = TokenInfo::TK_Eof;
+ Result.Text = "";
+ return Result;
case ',':
Result.Kind = TokenInfo::TK_Comma;
Result.Text = Code.substr(0, 1);
@@ -339,8 +343,27 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
if (const VariantValue NamedValue =
NamedValues ? NamedValues->lookup(NameToken.Text)
: VariantValue()) {
- *Value = NamedValue;
- return true;
+
+ if (Tokenizer->nextTokenKind() != TokenInfo::TK_Period) {
+ *Value = NamedValue;
+ return true;
+ }
+
+ std::string BindID;
+ if (!parseBindID(BindID))
+ return false;
+
+ assert(NamedValue.isMatcher());
+ llvm::Optional<DynTypedMatcher> Result =
+ NamedValue.getMatcher().getSingleMatcher();
+ if (Result.hasValue()) {
+ llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
+ if (Bound.hasValue()) {
+ *Value = VariantMatcher::SingleMatcher(*Bound);
+ return true;
+ }
+ }
+ return false;
}
// If the syntax is correct and the name is not a matcher either, report
// unknown named value.
@@ -359,6 +382,43 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
return parseMatcherExpressionImpl(NameToken, Value);
}
+bool Parser::parseBindID(std::string &BindID) {
+ // Parse .bind("foo")
+ assert(Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period);
+ Tokenizer->consumeNextToken(); // consume the period.
+ const TokenInfo BindToken = Tokenizer->consumeNextToken();
+ if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
+ addCompletion(BindToken, MatcherCompletion("bind(\"", "bind", 1));
+ return false;
+ }
+
+ const TokenInfo OpenToken = Tokenizer->consumeNextToken();
+ const TokenInfo IDToken = Tokenizer->consumeNextToken();
+ const TokenInfo CloseToken = Tokenizer->consumeNextToken();
+
+ // TODO: We could use different error codes for each/some to be more
+ // explicit about the syntax error.
+ if (BindToken.Kind != TokenInfo::TK_Ident ||
+ BindToken.Text != TokenInfo::ID_Bind) {
+ Error->addError(BindToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
+ Error->addError(OpenToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (IDToken.Kind != TokenInfo::TK_Literal || !IDToken.Value.isString()) {
+ Error->addError(IDToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ if (CloseToken.Kind != TokenInfo::TK_CloseParen) {
+ Error->addError(CloseToken.Range, Error->ET_ParserMalformedBindExpr);
+ return false;
+ }
+ BindID = IDToken.Value.getString();
+ return true;
+}
+
/// Parse and validate a matcher expression.
/// \return \c true on success, in which case \c Value has the matcher parsed.
/// If the input is malformed, or some argument has an error, it
@@ -425,38 +485,8 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
std::string BindID;
if (Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period) {
- // Parse .bind("foo")
- Tokenizer->consumeNextToken(); // consume the period.
- const TokenInfo BindToken = Tokenizer->consumeNextToken();
- if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
- addCompletion(BindToken, MatcherCompletion("bind(\"", "bind", 1));
+ if (!parseBindID(BindID))
return false;
- }
-
- const TokenInfo OpenToken = Tokenizer->consumeNextToken();
- const TokenInfo IDToken = Tokenizer->consumeNextToken();
- const TokenInfo CloseToken = Tokenizer->consumeNextToken();
-
- // TODO: We could use different error codes for each/some to be more
- // explicit about the syntax error.
- if (BindToken.Kind != TokenInfo::TK_Ident ||
- BindToken.Text != TokenInfo::ID_Bind) {
- Error->addError(BindToken.Range, Error->ET_ParserMalformedBindExpr);
- return false;
- }
- if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
- Error->addError(OpenToken.Range, Error->ET_ParserMalformedBindExpr);
- return false;
- }
- if (IDToken.Kind != TokenInfo::TK_Literal || !IDToken.Value.isString()) {
- Error->addError(IDToken.Range, Error->ET_ParserMalformedBindExpr);
- return false;
- }
- if (CloseToken.Kind != TokenInfo::TK_CloseParen) {
- Error->addError(CloseToken.Range, Error->ET_ParserMalformedBindExpr);
- return false;
- }
- BindID = IDToken.Value.getString();
}
if (!Ctor)
@@ -619,12 +649,12 @@ Parser::completeExpression(StringRef Code, unsigned CompletionOffset, Sema *S,
P.parseExpressionImpl(&Dummy);
// Sort by specificity, then by name.
- llvm::sort(P.Completions.begin(), P.Completions.end(),
+ llvm::sort(P.Completions,
[](const MatcherCompletion &A, const MatcherCompletion &B) {
- if (A.Specificity != B.Specificity)
- return A.Specificity > B.Specificity;
- return A.TypedText < B.TypedText;
- });
+ if (A.Specificity != B.Specificity)
+ return A.Specificity > B.Specificity;
+ return A.TypedText < B.TypedText;
+ });
return P.Completions;
}
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index 4d2d76f6a75b..e6e48467967e 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -106,6 +106,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_OVERLOADED_2(callee);
REGISTER_OVERLOADED_2(hasPrefix);
REGISTER_OVERLOADED_2(hasType);
+ REGISTER_OVERLOADED_2(ignoringParens);
REGISTER_OVERLOADED_2(isDerivedFrom);
REGISTER_OVERLOADED_2(isSameOrDerivedFrom);
REGISTER_OVERLOADED_2(loc);
@@ -133,11 +134,12 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(asString);
REGISTER_MATCHER(atomicExpr);
REGISTER_MATCHER(atomicType);
- REGISTER_MATCHER(autoType);
REGISTER_MATCHER(autoreleasePoolStmt)
- REGISTER_MATCHER(binaryOperator);
+ REGISTER_MATCHER(autoType);
REGISTER_MATCHER(binaryConditionalOperator);
+ REGISTER_MATCHER(binaryOperator);
REGISTER_MATCHER(blockDecl);
+ REGISTER_MATCHER(blockExpr);
REGISTER_MATCHER(blockPointerType);
REGISTER_MATCHER(booleanType);
REGISTER_MATCHER(breakStmt);
@@ -153,6 +155,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
+ REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
REGISTER_MATCHER(cStyleCastExpr);
@@ -167,6 +170,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxCtorInitializer);
REGISTER_MATCHER(cxxDefaultArgExpr);
REGISTER_MATCHER(cxxDeleteExpr);
+ REGISTER_MATCHER(cxxDependentScopeMemberExpr);
REGISTER_MATCHER(cxxDestructorDecl);
REGISTER_MATCHER(cxxDynamicCastExpr);
REGISTER_MATCHER(cxxForRangeStmt);
@@ -188,10 +192,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(decayedType);
REGISTER_MATCHER(decl);
REGISTER_MATCHER(declaratorDecl);
- REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(declCountIs);
REGISTER_MATCHER(declRefExpr);
REGISTER_MATCHER(declStmt);
+ REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentSizedArrayType);
REGISTER_MATCHER(designatedInitExpr);
@@ -208,6 +212,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(expr);
REGISTER_MATCHER(exprWithCleanups);
REGISTER_MATCHER(fieldDecl);
+ REGISTER_MATCHER(indirectFieldDecl);
REGISTER_MATCHER(floatLiteral);
REGISTER_MATCHER(forEach);
REGISTER_MATCHER(forEachArgumentWithParam);
@@ -232,6 +237,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
REGISTER_MATCHER(hasAnyParameter);
+ REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasAnySubstatement);
REGISTER_MATCHER(hasAnyTemplateArgument);
REGISTER_MATCHER(hasAnyUsingShadowDecl);
@@ -267,6 +273,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasInClassInitializer);
REGISTER_MATCHER(hasIncrement);
REGISTER_MATCHER(hasIndex);
+ REGISTER_MATCHER(hasInit);
REGISTER_MATCHER(hasInitializer);
REGISTER_MATCHER(hasKeywordSelector);
REGISTER_MATCHER(hasLHS);
@@ -290,11 +297,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasReturnValue);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSelector);
- REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasSingleDecl);
REGISTER_MATCHER(hasSize);
REGISTER_MATCHER(hasSizeExpr);
REGISTER_MATCHER(hasSourceExpression);
+ REGISTER_MATCHER(hasSpecializedTemplate);
REGISTER_MATCHER(hasStaticStorageDuration);
REGISTER_MATCHER(hasSyntacticForm);
REGISTER_MATCHER(hasTargetDecl);
@@ -307,14 +314,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasUnaryOperand);
REGISTER_MATCHER(hasUnarySelector);
REGISTER_MATCHER(hasUnderlyingDecl);
+ REGISTER_MATCHER(hasUnderlyingType);
REGISTER_MATCHER(hasUnqualifiedDesugaredType);
REGISTER_MATCHER(hasValueType);
REGISTER_MATCHER(ifStmt);
- REGISTER_MATCHER(ignoringImplicit);
REGISTER_MATCHER(ignoringImpCasts);
+ REGISTER_MATCHER(ignoringImplicit);
REGISTER_MATCHER(ignoringParenCasts);
REGISTER_MATCHER(ignoringParenImpCasts);
- REGISTER_MATCHER(ignoringParens);
+ REGISTER_MATCHER(imaginaryLiteral);
REGISTER_MATCHER(implicitCastExpr);
REGISTER_MATCHER(implicitValueInitExpr);
REGISTER_MATCHER(incompleteArrayType);
@@ -341,19 +349,21 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isDefaulted);
REGISTER_MATCHER(isDefinition);
REGISTER_MATCHER(isDeleted);
+ REGISTER_MATCHER(isDelegatingConstructor);
REGISTER_MATCHER(isExceptionVariable);
+ REGISTER_MATCHER(isExpansionInFileMatching);
+ REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isExpansionInSystemHeader);
REGISTER_MATCHER(isExplicit);
REGISTER_MATCHER(isExplicitTemplateSpecialization);
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
- REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isImplicit);
- REGISTER_MATCHER(isExpansionInFileMatching);
- REGISTER_MATCHER(isExpansionInMainFile);
+ REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isInstanceMessage);
REGISTER_MATCHER(isInstantiated);
- REGISTER_MATCHER(isExpansionInSystemHeader);
+ REGISTER_MATCHER(isInstantiationDependent);
REGISTER_MATCHER(isInteger);
REGISTER_MATCHER(isIntegral);
REGISTER_MATCHER(isInTemplateInstantiation);
@@ -372,11 +382,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isPure);
REGISTER_MATCHER(isScoped);
REGISTER_MATCHER(isSignedInteger);
+ REGISTER_MATCHER(isStaticLocal);
REGISTER_MATCHER(isStaticStorageClass);
REGISTER_MATCHER(isStruct);
REGISTER_MATCHER(isTemplateInstantiation);
+ REGISTER_MATCHER(isTypeDependent);
REGISTER_MATCHER(isUnion);
REGISTER_MATCHER(isUnsignedInteger);
+ REGISTER_MATCHER(isUserProvided);
+ REGISTER_MATCHER(isValueDependent);
REGISTER_MATCHER(isVariadic);
REGISTER_MATCHER(isVirtual);
REGISTER_MATCHER(isVirtualAsWritten);
@@ -399,10 +413,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(namesType);
REGISTER_MATCHER(nestedNameSpecifier);
REGISTER_MATCHER(nestedNameSpecifierLoc);
+ REGISTER_MATCHER(nonTypeTemplateParmDecl);
REGISTER_MATCHER(nullPointerConstant);
REGISTER_MATCHER(nullStmt);
REGISTER_MATCHER(numSelectorArgs);
- REGISTER_MATCHER(ofClass);
REGISTER_MATCHER(objcCatchStmt);
REGISTER_MATCHER(objcCategoryDecl);
REGISTER_MATCHER(objcCategoryImplDecl);
@@ -418,6 +432,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcProtocolDecl);
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
+ REGISTER_MATCHER(ofClass);
REGISTER_MATCHER(on);
REGISTER_MATCHER(onImplicitObjectArgument);
REGISTER_MATCHER(opaqueValueExpr);
@@ -437,6 +452,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(refersToDeclaration);
REGISTER_MATCHER(refersToIntegralType);
REGISTER_MATCHER(refersToType);
+ REGISTER_MATCHER(refersToTemplate);
REGISTER_MATCHER(requiresZeroInitialization);
REGISTER_MATCHER(returns);
REGISTER_MATCHER(returnStmt);
@@ -454,9 +470,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(substTemplateTypeParmType);
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
+ REGISTER_MATCHER(tagType);
REGISTER_MATCHER(templateArgument);
- REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateArgumentCountIs);
+ REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateSpecializationType);
REGISTER_MATCHER(templateTypeParmDecl);
REGISTER_MATCHER(templateTypeParmType);
@@ -464,20 +481,22 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(to);
REGISTER_MATCHER(translationUnitDecl);
REGISTER_MATCHER(type);
+ REGISTER_MATCHER(typeAliasDecl);
+ REGISTER_MATCHER(typeAliasTemplateDecl);
REGISTER_MATCHER(typedefDecl);
REGISTER_MATCHER(typedefNameDecl);
REGISTER_MATCHER(typedefType);
- REGISTER_MATCHER(typeAliasDecl);
- REGISTER_MATCHER(typeAliasTemplateDecl);
REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
REGISTER_MATCHER(unaryTransformType);
REGISTER_MATCHER(unless);
REGISTER_MATCHER(unresolvedLookupExpr);
+ REGISTER_MATCHER(unresolvedMemberExpr);
REGISTER_MATCHER(unresolvedUsingTypenameDecl);
REGISTER_MATCHER(unresolvedUsingValueDecl);
REGISTER_MATCHER(userDefinedLiteral);
+ REGISTER_MATCHER(usesADL);
REGISTER_MATCHER(usingDecl);
REGISTER_MATCHER(usingDirectiveDecl);
REGISTER_MATCHER(valueDecl);
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 9557f68452ff..30160bc239ae 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -27,7 +27,6 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
-#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
#include "clang/Analysis/BodyFarm.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
@@ -137,7 +136,7 @@ bool AnalysisDeclContext::isBodyAutosynthesized() const {
bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
bool Tmp;
Stmt *Body = getBody(Tmp);
- return Tmp && Body->getLocStart().isValid();
+ return Tmp && Body->getBeginLoc().isValid();
}
/// Returns true if \param VD is an Objective-C implicit 'self' parameter.
@@ -292,12 +291,6 @@ ParentMap &AnalysisDeclContext::getParentMap() {
return *PM;
}
-PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
- if (!PCA)
- PCA.reset(new PseudoConstantAnalysis(getBody()));
- return PCA.get();
-}
-
AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
@@ -392,7 +385,7 @@ LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
- L = new LOC(ctx, parent, d);
+ L = new LOC(ctx, parent, d, ++NewID);
Contexts.InsertNode(L, InsertPos);
}
return L;
@@ -409,7 +402,7 @@ LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
auto *L =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
- L = new StackFrameContext(ctx, parent, s, blk, idx);
+ L = new StackFrameContext(ctx, parent, s, blk, idx, ++NewID);
Contexts.InsertNode(L, InsertPos);
}
return L;
@@ -434,7 +427,7 @@ LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
- L = new BlockInvocationContext(ctx, parent, BD, ContextData);
+ L = new BlockInvocationContext(ctx, parent, BD, ContextData, ++NewID);
Contexts.InsertNode(L, InsertPos);
}
return L;
@@ -500,7 +493,7 @@ void LocationContext::dumpStack(
OS << "Calling anonymous code";
if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
OS << " at ";
- printLocation(OS, SM, S->getLocStart());
+ printLocation(OS, SM, S->getBeginLoc());
}
break;
case Scope:
@@ -510,7 +503,7 @@ void LocationContext::dumpStack(
OS << "Invoking block";
if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
OS << " defined at ";
- printLocation(OS, SM, D->getLocStart());
+ printLocation(OS, SM, D->getBeginLoc());
}
break;
}
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index ac8fcdc912a0..35f046406763 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -201,10 +201,9 @@ ObjCIvarRefExpr *ASTMaker::makeObjCIvarRef(const Expr *Base,
/*arrow=*/true, /*free=*/false);
}
-
ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
- return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal),
- nullptr);
+ return ReturnStmt::Create(C, SourceLocation(), const_cast<Expr *>(RetVal),
+ /* NRVOCandidate=*/nullptr);
}
IntegerLiteral *ASTMaker::makeIntegerLiteral(uint64_t Value, QualType Ty) {
@@ -270,8 +269,8 @@ static CallExpr *create_call_once_funcptr_call(ASTContext &C, ASTMaker M,
llvm_unreachable("Unexpected state");
}
- return new (C)
- CallExpr(C, SubExpr, CallArgs, C.VoidTy, VK_RValue, SourceLocation());
+ return CallExpr::Create(C, SubExpr, CallArgs, C.VoidTy, VK_RValue,
+ SourceLocation());
}
static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
@@ -293,12 +292,12 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/* T =*/ callOperatorDecl->getType(),
/* VK =*/ VK_LValue);
- return new (C)
- CXXOperatorCallExpr(/*AstContext=*/C, OO_Call, callOperatorDeclRef,
- /*args=*/CallArgs,
- /*QualType=*/C.VoidTy,
- /*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation(), FPOptions());
+ return CXXOperatorCallExpr::Create(
+ /*AstContext=*/C, OO_Call, callOperatorDeclRef,
+ /*args=*/CallArgs,
+ /*QualType=*/C.VoidTy,
+ /*ExprValueType=*/VK_RValue,
+ /*SourceLocation=*/SourceLocation(), FPOptions());
}
/// Create a fake body for std::call_once.
@@ -464,13 +463,13 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
Deref, M.makeIntegralCast(M.makeIntegerLiteral(1, C.IntTy), DerefType),
DerefType);
- IfStmt *Out = new (C)
- IfStmt(C, SourceLocation(),
- /* IsConstexpr=*/ false,
- /* init=*/ nullptr,
- /* var=*/ nullptr,
- /* cond=*/ FlagCheck,
- /* then=*/ M.makeCompound({CallbackCall, FlagAssignment}));
+ auto *Out =
+ IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr,
+ /* cond=*/FlagCheck,
+ /* then=*/M.makeCompound({CallbackCall, FlagAssignment}));
return Out;
}
@@ -510,7 +509,7 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
ASTMaker M(C);
// (1) Create the call.
- CallExpr *CE = new (C) CallExpr(
+ CallExpr *CE = CallExpr::Create(
/*ASTContext=*/C,
/*StmtClass=*/M.makeLvalueToRvalue(/*Expr=*/Block),
/*args=*/None,
@@ -549,12 +548,12 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
Expr *GuardCondition = M.makeComparison(LValToRval, DoneValue, BO_NE);
// (5) Create the 'if' statement.
- IfStmt *If = new (C) IfStmt(C, SourceLocation(),
- /* IsConstexpr=*/ false,
- /* init=*/ nullptr,
- /* var=*/ nullptr,
- /* cond=*/ GuardCondition,
- /* then=*/ CS);
+ auto *If = IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr,
+ /* cond=*/GuardCondition,
+ /* then=*/CS);
return If;
}
@@ -580,8 +579,8 @@ static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
ASTMaker M(C);
DeclRefExpr *DR = M.makeDeclRefExpr(PV);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
- CallExpr *CE = new (C) CallExpr(C, ICE, None, C.VoidTy, VK_RValue,
- SourceLocation());
+ CallExpr *CE =
+ CallExpr::Create(C, ICE, None, C.VoidTy, VK_RValue, SourceLocation());
return CE;
}
@@ -657,8 +656,11 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
Stmt *Else = M.makeReturn(RetVal);
/// Construct the If.
- Stmt *If = new (C) IfStmt(C, SourceLocation(), false, nullptr, nullptr,
- Comparison, Body, SourceLocation(), Else);
+ auto *If = IfStmt::Create(C, SourceLocation(),
+ /* IsConstexpr=*/false,
+ /* init=*/nullptr,
+ /* var=*/nullptr, Comparison, Body,
+ SourceLocation(), Else);
return If;
}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 97829de7ace3..96130c25be8a 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -551,6 +551,7 @@ private:
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitConstantExpr(ConstantExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
CFGBlock *VisitBlockExpr(BlockExpr *E, AddStmtChoice asc);
@@ -571,7 +572,7 @@ private:
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
CFGBlock *VisitObjCMessageExpr(ObjCMessageExpr *E, AddStmtChoice asc);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
- CFGBlock *VisitReturnStmt(ReturnStmt *R);
+ CFGBlock *VisitReturnStmt(Stmt *S);
CFGBlock *VisitSEHExceptStmt(SEHExceptStmt *S);
CFGBlock *VisitSEHFinallyStmt(SEHFinallyStmt *S);
CFGBlock *VisitSEHLeaveStmt(SEHLeaveStmt *S);
@@ -1038,12 +1039,14 @@ private:
if (!areExprTypesCompatible(Expr1, Expr2))
return {};
- llvm::APSInt L1, L2;
-
- if (!Expr1->EvaluateAsInt(L1, *Context) ||
- !Expr2->EvaluateAsInt(L2, *Context))
+ Expr::EvalResult L1Result, L2Result;
+ if (!Expr1->EvaluateAsInt(L1Result, *Context) ||
+ !Expr2->EvaluateAsInt(L2Result, *Context))
return {};
+ llvm::APSInt L1 = L1Result.Val.getInt();
+ llvm::APSInt L2 = L2Result.Val.getInt();
+
// Can't compare signed with unsigned or with different bit width.
if (L1.isSigned() != L2.isSigned() || L1.getBitWidth() != L2.getBitWidth())
return {};
@@ -1133,13 +1136,16 @@ private:
case BO_And: {
// If either operand is zero, we know the value
// must be false.
- llvm::APSInt IntVal;
- if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) {
+ Expr::EvalResult LHSResult;
+ if (Bop->getLHS()->EvaluateAsInt(LHSResult, *Context)) {
+ llvm::APSInt IntVal = LHSResult.Val.getInt();
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
}
- if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) {
+ Expr::EvalResult RHSResult;
+ if (Bop->getRHS()->EvaluateAsInt(RHSResult, *Context)) {
+ llvm::APSInt IntVal = RHSResult.Val.getInt();
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
@@ -1334,6 +1340,7 @@ void CFGBuilder::findConstructionContexts(
case CK_NoOp:
case CK_ConstructorConversion:
findConstructionContexts(Layer, Cast->getSubExpr());
+ break;
default:
break;
}
@@ -2099,6 +2106,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::ImplicitCastExprClass:
return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
+ case Stmt::ConstantExprClass:
+ return VisitConstantExpr(cast<ConstantExpr>(S), asc);
+
case Stmt::IndirectGotoStmtClass:
return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
@@ -2146,7 +2156,8 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
case Stmt::ReturnStmtClass:
- return VisitReturnStmt(cast<ReturnStmt>(S));
+ case Stmt::CoreturnStmtClass:
+ return VisitReturnStmt(S);
case Stmt::SEHExceptStmtClass:
return VisitSEHExceptStmt(cast<SEHExceptStmt>(S));
@@ -2421,8 +2432,6 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!boundType.isNull()) calleeType = boundType;
}
- findConstructionContextsForArguments(C);
-
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
@@ -2439,6 +2448,13 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
bool OmitArguments = false;
if (FunctionDecl *FD = C->getDirectCallee()) {
+ // TODO: Support construction contexts for variadic function arguments.
+ // These are a bit problematic and not very useful because passing
+ // C++ objects as C-style variadic arguments doesn't work in general
+ // (see [expr.call]).
+ if (!FD->isVariadic())
+ findConstructionContextsForArguments(C);
+
if (FD->isNoReturn() || C->isBuiltinAssumeFalse(*Context))
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
@@ -2627,15 +2643,12 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
E = DS->decl_rend();
I != E; ++I) {
- // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
- unsigned A = alignof(DeclStmt) < 8 ? 8 : alignof(DeclStmt);
// Allocate the DeclStmt using the BumpPtrAllocator. It will get
// automatically freed with the CFG.
DeclGroupRef DG(*I);
Decl *D = *I;
- void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
- DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+ DeclStmt *DSNew = new (Context) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
cfg->addSyntheticDeclStmt(DSNew, DS);
// Append the fake DeclStmt to block.
@@ -2874,22 +2887,24 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
return LastBlock;
}
-CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
+CFGBlock *CFGBuilder::VisitReturnStmt(Stmt *S) {
// If we were in the middle of a block we stop processing that block.
//
- // NOTE: If a "return" appears in the middle of a block, this means that the
- // code afterwards is DEAD (unreachable). We still keep a basic block
- // for that code; a simple "mark-and-sweep" from the entry block will be
- // able to report such dead blocks.
+ // NOTE: If a "return" or "co_return" appears in the middle of a block, this
+ // means that the code afterwards is DEAD (unreachable). We still keep
+ // a basic block for that code; a simple "mark-and-sweep" from the entry
+ // block will be able to report such dead blocks.
+ assert(isa<ReturnStmt>(S) || isa<CoreturnStmt>(S));
// Create the new block.
Block = createBlock(false);
- addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), R);
+ addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), S);
- findConstructionContexts(
- ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
- R->getRetValue());
+ if (auto *R = dyn_cast<ReturnStmt>(S))
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
+ R->getRetValue());
// If the one of the destructors does not return, we already have the Exit
// block as a successor.
@@ -2898,7 +2913,7 @@ CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
// Add the return statement to the block. This may create new blocks if R
// contains control-flow (short-circuit operations).
- return VisitStmt(R, AddStmtChoice::AlwaysAdd);
+ return VisitStmt(S, AddStmtChoice::AlwaysAdd);
}
CFGBlock *CFGBuilder::VisitSEHExceptStmt(SEHExceptStmt *ES) {
@@ -4250,7 +4265,10 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
Block = createBlock();
addStmt(S->getBeginStmt());
addStmt(S->getEndStmt());
- return addStmt(S->getRangeStmt());
+ CFGBlock *Head = addStmt(S->getRangeStmt());
+ if (S->getInit())
+ Head = addStmt(S->getInit());
+ return Head;
}
CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
@@ -4352,6 +4370,11 @@ CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
AddStmtChoice asc) {
+ // If the constructor takes objects as arguments by value, we need to properly
+ // construct these objects. Construction contexts we find here aren't for the
+ // constructor C, they're for its arguments only.
+ findConstructionContextsForArguments(C);
+
autoCreateBlock();
appendConstructor(Block, C);
return VisitChildren(C);
@@ -4366,6 +4389,10 @@ CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
return Visit(E->getSubExpr(), AddStmtChoice());
}
+CFGBlock *CFGBuilder::VisitConstantExpr(ConstantExpr *E, AddStmtChoice asc) {
+ return Visit(E->getSubExpr(), AddStmtChoice());
+}
+
CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
// Lazily create the indirect-goto dispatch block if there isn't one already.
CFGBlock *IBlock = cfg->getIndirectGotoBlock();
@@ -4422,6 +4449,10 @@ tryAgain:
E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
goto tryAgain;
+ case Stmt::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ goto tryAgain;
+
case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
goto tryAgain;
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 432067d98157..5345a56f2002 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -15,16 +15,12 @@ add_clang_library(clangAnalysis
Consumed.cpp
CodeInjector.cpp
Dominators.cpp
- FormatString.cpp
+ ExprMutationAnalyzer.cpp
LiveVariables.cpp
- OSLog.cpp
ObjCNoReturn.cpp
PostOrderCFGView.cpp
- PrintfFormatString.cpp
ProgramPoint.cpp
- PseudoConstantAnalysis.cpp
ReachableCode.cpp
- ScanfFormatString.cpp
ThreadSafety.cpp
ThreadSafetyCommon.cpp
ThreadSafetyLogical.cpp
@@ -33,6 +29,7 @@ add_clang_library(clangAnalysis
LINK_LIBS
clangAST
+ clangASTMatchers
clangBasic
clangLex
)
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index bac00680ffda..66a6f1a9bcea 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -212,7 +212,7 @@ void CallGraph::viewGraph() const {
void CallGraphNode::print(raw_ostream &os) const {
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(FD))
- return ND->printName(os);
+ return ND->printQualifiedName(os);
os << "< >";
}
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index 8912b3b76751..88402e2adaa7 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
///
-/// This file implements classes for searching and anlyzing source code clones.
+/// This file implements classes for searching and analyzing source code clones.
///
//===----------------------------------------------------------------------===//
@@ -45,8 +45,8 @@ bool StmtSequence::contains(const StmtSequence &Other) const {
// Otherwise check if the start and end locations of the current sequence
// surround the other sequence.
bool StartIsInBounds =
- SM.isBeforeInTranslationUnit(getStartLoc(), Other.getStartLoc()) ||
- getStartLoc() == Other.getStartLoc();
+ SM.isBeforeInTranslationUnit(getBeginLoc(), Other.getBeginLoc()) ||
+ getBeginLoc() == Other.getBeginLoc();
if (!StartIsInBounds)
return false;
@@ -77,14 +77,14 @@ ASTContext &StmtSequence::getASTContext() const {
return D->getASTContext();
}
-SourceLocation StmtSequence::getStartLoc() const {
- return front()->getLocStart();
+SourceLocation StmtSequence::getBeginLoc() const {
+ return front()->getBeginLoc();
}
-SourceLocation StmtSequence::getEndLoc() const { return back()->getLocEnd(); }
+SourceLocation StmtSequence::getEndLoc() const { return back()->getEndLoc(); }
SourceRange StmtSequence::getSourceRange() const {
- return SourceRange(getStartLoc(), getEndLoc());
+ return SourceRange(getBeginLoc(), getEndLoc());
}
void CloneDetector::analyzeCodeBody(const Decl *D) {
@@ -433,7 +433,7 @@ size_t MinComplexityConstraint::calculateStmtComplexity(
// Look up what macros expanded into the current statement.
std::string MacroStack =
- data_collection::getMacroStack(Seq.getStartLoc(), Context);
+ data_collection::getMacroStack(Seq.getBeginLoc(), Context);
// First, check if ParentMacroStack is not empty which means we are currently
// dealing with a parent statement which was expanded from a macro.
@@ -523,8 +523,7 @@ void CloneConstraint::splitCloneGroups(
Result.push_back(PotentialGroup);
}
- assert(std::all_of(Indexes.begin(), Indexes.end(),
- [](char c) { return c == 1; }));
+ assert(llvm::all_of(Indexes, [](char c) { return c == 1; }));
}
CloneGroups = Result;
}
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index bc81a71b3d91..16eeaba2f61b 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -64,7 +64,7 @@ static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
// is not empty.
for (const auto &B : *Block)
if (Optional<CFGStmt> CS = B.getAs<CFGStmt>())
- return CS->getStmt()->getLocStart();
+ return CS->getStmt()->getBeginLoc();
// Block is empty.
// If we have one successor, return the first statement in that block
@@ -78,12 +78,12 @@ static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
// Find the source location of the last statement in the block, if the block
// is not empty.
if (const Stmt *StmtNode = Block->getTerminator()) {
- return StmtNode->getLocStart();
+ return StmtNode->getBeginLoc();
} else {
for (CFGBlock::const_reverse_iterator BI = Block->rbegin(),
BE = Block->rend(); BI != BE; ++BI) {
if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>())
- return CS->getStmt()->getLocStart();
+ return CS->getStmt()->getBeginLoc();
}
}
@@ -463,7 +463,6 @@ class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
using InfoEntry = MapType::iterator;
using ConstInfoEntry = MapType::const_iterator;
- AnalysisDeclContext &AC;
ConsumedAnalyzer &Analyzer;
ConsumedStateMap *StateMap;
MapType PropagationMap;
@@ -515,9 +514,8 @@ public:
void VisitUnaryOperator(const UnaryOperator *UOp);
void VisitVarDecl(const VarDecl *Var);
- ConsumedStmtVisitor(AnalysisDeclContext &AC, ConsumedAnalyzer &Analyzer,
- ConsumedStateMap *StateMap)
- : AC(AC), Analyzer(Analyzer), StateMap(StateMap) {}
+ ConsumedStmtVisitor(ConsumedAnalyzer &Analyzer, ConsumedStateMap *StateMap)
+ : Analyzer(Analyzer), StateMap(StateMap) {}
PropagationInfo getInfo(const Expr *StmtNode) const {
ConstInfoEntry Entry = findInfo(StmtNode);
@@ -774,8 +772,7 @@ void ConsumedStmtVisitor::VisitCXXBindTemporaryExpr(
void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
CXXConstructorDecl *Constructor = Call->getConstructor();
- ASTContext &CurrContext = AC.getASTContext();
- QualType ThisType = Constructor->getThisType(CurrContext)->getPointeeType();
+ QualType ThisType = Constructor->getThisType()->getPointeeType();
if (!isConsumableType(ThisType))
return;
@@ -793,7 +790,7 @@ void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
} else if (Constructor->isCopyConstructor()) {
// Copy state from arg. If setStateOnRead then set arg to CS_Unknown.
ConsumedState NS =
- isSetOnReadPtrType(Constructor->getThisType(CurrContext)) ?
+ isSetOnReadPtrType(Constructor->getThisType()) ?
CS_Unknown : CS_None;
copyInfo(Call->getArg(0), Call, NS);
} else {
@@ -893,7 +890,7 @@ void ConsumedStmtVisitor::VisitReturnStmt(const ReturnStmt *Ret) {
}
}
- StateMap->checkParamsForReturnTypestate(Ret->getLocStart(),
+ StateMap->checkParamsForReturnTypestate(Ret->getBeginLoc(),
Analyzer.WarningsHandler);
}
@@ -1203,8 +1200,7 @@ void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D) {
QualType ReturnType;
if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
- ASTContext &CurrContext = AC.getASTContext();
- ReturnType = Constructor->getThisType(CurrContext)->getPointeeType();
+ ReturnType = Constructor->getThisType()->getPointeeType();
} else
ReturnType = D->getCallResultType();
@@ -1323,7 +1319,7 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
BlockInfo = ConsumedBlockInfo(CFGraph->getNumBlockIDs(), SortedGraph);
CurrStates = llvm::make_unique<ConsumedStateMap>();
- ConsumedStmtVisitor Visitor(AC, *this, CurrStates.get());
+ ConsumedStmtVisitor Visitor(*this, CurrStates.get());
// Add all trackable parameters to the state map.
for (const auto *PI : D->parameters())
@@ -1363,7 +1359,7 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
case CFGElement::AutomaticObjectDtor: {
const CFGAutomaticObjDtor &DTor = B.castAs<CFGAutomaticObjDtor>();
- SourceLocation Loc = DTor.getTriggerStmt()->getLocEnd();
+ SourceLocation Loc = DTor.getTriggerStmt()->getEndLoc();
const VarDecl *Var = DTor.getVarDecl();
Visitor.checkCallability(PropagationInfo(Var),
diff --git a/lib/Analysis/ExprMutationAnalyzer.cpp b/lib/Analysis/ExprMutationAnalyzer.cpp
new file mode 100644
index 000000000000..8414cb5c726a
--- /dev/null
+++ b/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -0,0 +1,445 @@
+//===---------- ExprMutationAnalyzer.cpp ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+using namespace ast_matchers;
+
+namespace {
+
+AST_MATCHER_P(LambdaExpr, hasCaptureInit, const Expr *, E) {
+ return llvm::is_contained(Node.capture_inits(), E);
+}
+
+AST_MATCHER_P(CXXForRangeStmt, hasRangeStmt,
+ ast_matchers::internal::Matcher<DeclStmt>, InnerMatcher) {
+ const DeclStmt *const Range = Node.getRangeStmt();
+ return InnerMatcher.matches(*Range, Finder, Builder);
+}
+
+const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXTypeidExpr>
+ cxxTypeidExpr;
+
+AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
+ return Node.isPotentiallyEvaluated();
+}
+
+const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
+ cxxNoexceptExpr;
+
+const ast_matchers::internal::VariadicDynCastAllOfMatcher<Stmt,
+ GenericSelectionExpr>
+ genericSelectionExpr;
+
+AST_MATCHER_P(GenericSelectionExpr, hasControllingExpr,
+ ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+ return InnerMatcher.matches(*Node.getControllingExpr(), Finder, Builder);
+}
+
+const auto nonConstReferenceType = [] {
+ return hasUnqualifiedDesugaredType(
+ referenceType(pointee(unless(isConstQualified()))));
+};
+
+const auto nonConstPointerType = [] {
+ return hasUnqualifiedDesugaredType(
+ pointerType(pointee(unless(isConstQualified()))));
+};
+
+const auto isMoveOnly = [] {
+ return cxxRecordDecl(
+ hasMethod(cxxConstructorDecl(isMoveConstructor(), unless(isDeleted()))),
+ hasMethod(cxxMethodDecl(isMoveAssignmentOperator(), unless(isDeleted()))),
+ unless(anyOf(hasMethod(cxxConstructorDecl(isCopyConstructor(),
+ unless(isDeleted()))),
+ hasMethod(cxxMethodDecl(isCopyAssignmentOperator(),
+ unless(isDeleted()))))));
+};
+
+template <class T> struct NodeID;
+template <> struct NodeID<Expr> { static const std::string value; };
+template <> struct NodeID<Decl> { static const std::string value; };
+const std::string NodeID<Expr>::value = "expr";
+const std::string NodeID<Decl>::value = "decl";
+
+template <class T, class F = const Stmt *(ExprMutationAnalyzer::*)(const T *)>
+const Stmt *tryEachMatch(ArrayRef<ast_matchers::BoundNodes> Matches,
+ ExprMutationAnalyzer *Analyzer, F Finder) {
+ const StringRef ID = NodeID<T>::value;
+ for (const auto &Nodes : Matches) {
+ if (const Stmt *S = (Analyzer->*Finder)(Nodes.getNodeAs<T>(ID)))
+ return S;
+ }
+ return nullptr;
+}
+
+} // namespace
+
+const Stmt *ExprMutationAnalyzer::findMutation(const Expr *Exp) {
+ return findMutationMemoized(Exp,
+ {&ExprMutationAnalyzer::findDirectMutation,
+ &ExprMutationAnalyzer::findMemberMutation,
+ &ExprMutationAnalyzer::findArrayElementMutation,
+ &ExprMutationAnalyzer::findCastMutation,
+ &ExprMutationAnalyzer::findRangeLoopMutation,
+ &ExprMutationAnalyzer::findReferenceMutation,
+ &ExprMutationAnalyzer::findFunctionArgMutation},
+ Results);
+}
+
+const Stmt *ExprMutationAnalyzer::findMutation(const Decl *Dec) {
+ return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findMutation);
+}
+
+const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Expr *Exp) {
+ return findMutationMemoized(Exp, {/*TODO*/}, PointeeResults);
+}
+
+const Stmt *ExprMutationAnalyzer::findPointeeMutation(const Decl *Dec) {
+ return tryEachDeclRef(Dec, &ExprMutationAnalyzer::findPointeeMutation);
+}
+
+const Stmt *ExprMutationAnalyzer::findMutationMemoized(
+ const Expr *Exp, llvm::ArrayRef<MutationFinder> Finders,
+ ResultMap &MemoizedResults) {
+ const auto Memoized = MemoizedResults.find(Exp);
+ if (Memoized != MemoizedResults.end())
+ return Memoized->second;
+
+ if (isUnevaluated(Exp))
+ return MemoizedResults[Exp] = nullptr;
+
+ for (const auto &Finder : Finders) {
+ if (const Stmt *S = (this->*Finder)(Exp))
+ return MemoizedResults[Exp] = S;
+ }
+
+ return MemoizedResults[Exp] = nullptr;
+}
+
+const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
+ MutationFinder Finder) {
+ const auto Refs =
+ match(findAll(declRefExpr(to(equalsNode(Dec))).bind(NodeID<Expr>::value)),
+ Stm, Context);
+ for (const auto &RefNodes : Refs) {
+ const auto *E = RefNodes.getNodeAs<Expr>(NodeID<Expr>::value);
+ if ((this->*Finder)(E))
+ return E;
+ }
+ return nullptr;
+}
+
+bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
+ return selectFirst<Expr>(
+ NodeID<Expr>::value,
+ match(
+ findAll(
+ expr(equalsNode(Exp),
+ anyOf(
+ // `Exp` is part of the underlying expression of
+ // decltype/typeof if it has an ancestor of
+ // typeLoc.
+ hasAncestor(typeLoc(unless(
+ hasAncestor(unaryExprOrTypeTraitExpr())))),
+ hasAncestor(expr(anyOf(
+ // `UnaryExprOrTypeTraitExpr` is unevaluated
+ // unless it's sizeof on VLA.
+ unaryExprOrTypeTraitExpr(unless(sizeOfExpr(
+ hasArgumentOfType(variableArrayType())))),
+ // `CXXTypeidExpr` is unevaluated unless it's
+ // applied to an expression of glvalue of
+ // polymorphic class type.
+ cxxTypeidExpr(
+ unless(isPotentiallyEvaluated())),
+ // The controlling expression of
+ // `GenericSelectionExpr` is unevaluated.
+ genericSelectionExpr(hasControllingExpr(
+ hasDescendant(equalsNode(Exp)))),
+ cxxNoexceptExpr())))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context)) != nullptr;
+}
+
+const Stmt *
+ExprMutationAnalyzer::findExprMutation(ArrayRef<BoundNodes> Matches) {
+ return tryEachMatch<Expr>(Matches, this, &ExprMutationAnalyzer::findMutation);
+}
+
+const Stmt *
+ExprMutationAnalyzer::findDeclMutation(ArrayRef<BoundNodes> Matches) {
+ return tryEachMatch<Decl>(Matches, this, &ExprMutationAnalyzer::findMutation);
+}
+
+const Stmt *ExprMutationAnalyzer::findExprPointeeMutation(
+ ArrayRef<ast_matchers::BoundNodes> Matches) {
+ return tryEachMatch<Expr>(Matches, this,
+ &ExprMutationAnalyzer::findPointeeMutation);
+}
+
+const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
+ ArrayRef<ast_matchers::BoundNodes> Matches) {
+ return tryEachMatch<Decl>(Matches, this,
+ &ExprMutationAnalyzer::findPointeeMutation);
+}
+
+const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
+ // LHS of any assignment operators.
+ const auto AsAssignmentLhs =
+ binaryOperator(isAssignmentOperator(), hasLHS(equalsNode(Exp)));
+
+ // Operand of increment/decrement operators.
+ const auto AsIncDecOperand =
+ unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
+ hasUnaryOperand(equalsNode(Exp)));
+
+ // Invoking non-const member function.
+ // A member function is assumed to be non-const when it is unresolved.
+ const auto NonConstMethod = cxxMethodDecl(unless(isConst()));
+ const auto AsNonConstThis =
+ expr(anyOf(cxxMemberCallExpr(callee(NonConstMethod), on(equalsNode(Exp))),
+ cxxOperatorCallExpr(callee(NonConstMethod),
+ hasArgument(0, equalsNode(Exp))),
+ callExpr(callee(expr(anyOf(
+ unresolvedMemberExpr(hasObjectExpression(equalsNode(Exp))),
+ cxxDependentScopeMemberExpr(
+ hasObjectExpression(equalsNode(Exp)))))))));
+
+ // Taking address of 'Exp'.
+ // We're assuming 'Exp' is mutated as soon as its address is taken, though in
+ // theory we can follow the pointer and see whether it escaped `Stm` or is
+ // dereferenced and then mutated. This is left for future improvements.
+ const auto AsAmpersandOperand =
+ unaryOperator(hasOperatorName("&"),
+ // A NoOp implicit cast is adding const.
+ unless(hasParent(implicitCastExpr(hasCastKind(CK_NoOp)))),
+ hasUnaryOperand(equalsNode(Exp)));
+ const auto AsPointerFromArrayDecay =
+ castExpr(hasCastKind(CK_ArrayToPointerDecay),
+ unless(hasParent(arraySubscriptExpr())), has(equalsNode(Exp)));
+ // Treat calling `operator->()` of move-only classes as taking address.
+ // These are typically smart pointers with unique ownership so we treat
+ // mutation of pointee as mutation of the smart pointer itself.
+ const auto AsOperatorArrowThis =
+ cxxOperatorCallExpr(hasOverloadedOperatorName("->"),
+ callee(cxxMethodDecl(ofClass(isMoveOnly()),
+ returns(nonConstPointerType()))),
+ argumentCountIs(1), hasArgument(0, equalsNode(Exp)));
+
+ // Used as non-const-ref argument when calling a function.
+ // An argument is assumed to be non-const-ref when the function is unresolved.
+ // Instantiated template functions are not handled here but in
+ // findFunctionArgMutation which has additional smarts for handling forwarding
+ // references.
+ const auto NonConstRefParam = forEachArgumentWithParam(
+ equalsNode(Exp), parmVarDecl(hasType(nonConstReferenceType())));
+ const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
+ const auto AsNonConstRefArg = anyOf(
+ callExpr(NonConstRefParam, NotInstantiated),
+ cxxConstructExpr(NonConstRefParam, NotInstantiated),
+ callExpr(callee(expr(anyOf(unresolvedLookupExpr(), unresolvedMemberExpr(),
+ cxxDependentScopeMemberExpr(),
+ hasType(templateTypeParmType())))),
+ hasAnyArgument(equalsNode(Exp))),
+ cxxUnresolvedConstructExpr(hasAnyArgument(equalsNode(Exp))));
+
+ // Captured by a lambda by reference.
+ // If we're initializing a capture with 'Exp' directly then we're initializing
+ // a reference capture.
+ // For value captures there will be an ImplicitCastExpr <LValueToRValue>.
+ const auto AsLambdaRefCaptureInit = lambdaExpr(hasCaptureInit(Exp));
+
+ // Returned as non-const-ref.
+ // If we're returning 'Exp' directly then it's returned as non-const-ref.
+ // For returning by value there will be an ImplicitCastExpr <LValueToRValue>.
+ // For returning by const-ref there will be an ImplicitCastExpr <NoOp> (for
+ // adding const.)
+ const auto AsNonConstRefReturn = returnStmt(hasReturnValue(equalsNode(Exp)));
+
+ const auto Matches =
+ match(findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
+ AsAmpersandOperand, AsPointerFromArrayDecay,
+ AsOperatorArrowThis, AsNonConstRefArg,
+ AsLambdaRefCaptureInit, AsNonConstRefReturn))
+ .bind("stmt")),
+ Stm, Context);
+ return selectFirst<Stmt>("stmt", Matches);
+}
+
+const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
+ // Check whether any member of 'Exp' is mutated.
+ const auto MemberExprs =
+ match(findAll(expr(anyOf(memberExpr(hasObjectExpression(equalsNode(Exp))),
+ cxxDependentScopeMemberExpr(
+ hasObjectExpression(equalsNode(Exp)))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+ return findExprMutation(MemberExprs);
+}
+
+const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
+ // Check whether any element of an array is mutated.
+ const auto SubscriptExprs = match(
+ findAll(arraySubscriptExpr(hasBase(ignoringImpCasts(equalsNode(Exp))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+ return findExprMutation(SubscriptExprs);
+}
+
+const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
+ // If 'Exp' is casted to any non-const reference type, check the castExpr.
+ const auto Casts =
+ match(findAll(castExpr(hasSourceExpression(equalsNode(Exp)),
+ anyOf(explicitCastExpr(hasDestinationType(
+ nonConstReferenceType())),
+ implicitCastExpr(hasImplicitDestinationType(
+ nonConstReferenceType()))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+ if (const Stmt *S = findExprMutation(Casts))
+ return S;
+ // Treat std::{move,forward} as cast.
+ const auto Calls =
+ match(findAll(callExpr(callee(namedDecl(
+ hasAnyName("::std::move", "::std::forward"))),
+ hasArgument(0, equalsNode(Exp)))
+ .bind("expr")),
+ Stm, Context);
+ return findExprMutation(Calls);
+}
+
+const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
+ // If range for looping over 'Exp' with a non-const reference loop variable,
+ // check all declRefExpr of the loop variable.
+ const auto LoopVars =
+ match(findAll(cxxForRangeStmt(
+ hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
+ .bind(NodeID<Decl>::value)),
+ hasRangeInit(equalsNode(Exp)))),
+ Stm, Context);
+ return findDeclMutation(LoopVars);
+}
+
+const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
+ // Follow non-const reference returned by `operator*()` of move-only classes.
+ // These are typically smart pointers with unique ownership so we treat
+ // mutation of pointee as mutation of the smart pointer itself.
+ const auto Ref =
+ match(findAll(cxxOperatorCallExpr(
+ hasOverloadedOperatorName("*"),
+ callee(cxxMethodDecl(ofClass(isMoveOnly()),
+ returns(nonConstReferenceType()))),
+ argumentCountIs(1), hasArgument(0, equalsNode(Exp)))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+ if (const Stmt *S = findExprMutation(Ref))
+ return S;
+
+ // If 'Exp' is bound to a non-const reference, check all declRefExpr to that.
+ const auto Refs = match(
+ stmt(forEachDescendant(
+ varDecl(
+ hasType(nonConstReferenceType()),
+ hasInitializer(anyOf(equalsNode(Exp),
+ conditionalOperator(anyOf(
+ hasTrueExpression(equalsNode(Exp)),
+ hasFalseExpression(equalsNode(Exp)))))),
+ hasParent(declStmt().bind("stmt")),
+ // Don't follow the reference in range statement, we've handled
+ // that separately.
+ unless(hasParent(declStmt(hasParent(
+ cxxForRangeStmt(hasRangeStmt(equalsBoundNode("stmt"))))))))
+ .bind(NodeID<Decl>::value))),
+ Stm, Context);
+ return findDeclMutation(Refs);
+}
+
+const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
+ const auto NonConstRefParam = forEachArgumentWithParam(
+ equalsNode(Exp),
+ parmVarDecl(hasType(nonConstReferenceType())).bind("parm"));
+ const auto IsInstantiated = hasDeclaration(isInstantiated());
+ const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));
+ const auto Matches = match(
+ findAll(expr(anyOf(callExpr(NonConstRefParam, IsInstantiated, FuncDecl,
+ unless(callee(namedDecl(hasAnyName(
+ "::std::move", "::std::forward"))))),
+ cxxConstructExpr(NonConstRefParam, IsInstantiated,
+ FuncDecl)))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
+ for (const auto &Nodes : Matches) {
+ const auto *Exp = Nodes.getNodeAs<Expr>(NodeID<Expr>::value);
+ const auto *Func = Nodes.getNodeAs<FunctionDecl>("func");
+ if (!Func->getBody() || !Func->getPrimaryTemplate())
+ return Exp;
+
+ const auto *Parm = Nodes.getNodeAs<ParmVarDecl>("parm");
+ const ArrayRef<ParmVarDecl *> AllParams =
+ Func->getPrimaryTemplate()->getTemplatedDecl()->parameters();
+ QualType ParmType =
+ AllParams[std::min<size_t>(Parm->getFunctionScopeIndex(),
+ AllParams.size() - 1)]
+ ->getType();
+ if (const auto *T = ParmType->getAs<PackExpansionType>())
+ ParmType = T->getPattern();
+
+ // If param type is forwarding reference, follow into the function
+ // definition and see whether the param is mutated inside.
+ if (const auto *RefType = ParmType->getAs<RValueReferenceType>()) {
+ if (!RefType->getPointeeType().getQualifiers() &&
+ RefType->getPointeeType()->getAs<TemplateTypeParmType>()) {
+ std::unique_ptr<FunctionParmMutationAnalyzer> &Analyzer =
+ FuncParmAnalyzer[Func];
+ if (!Analyzer)
+ Analyzer.reset(new FunctionParmMutationAnalyzer(*Func, Context));
+ if (Analyzer->findMutation(Parm))
+ return Exp;
+ continue;
+ }
+ }
+ // Not forwarding reference.
+ return Exp;
+ }
+ return nullptr;
+}
+
+FunctionParmMutationAnalyzer::FunctionParmMutationAnalyzer(
+ const FunctionDecl &Func, ASTContext &Context)
+ : BodyAnalyzer(*Func.getBody(), Context) {
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(&Func)) {
+ // CXXCtorInitializer might also mutate Param but they're not part of
+ // function body, check them eagerly here since they're typically trivial.
+ for (const CXXCtorInitializer *Init : Ctor->inits()) {
+ ExprMutationAnalyzer InitAnalyzer(*Init->getInit(), Context);
+ for (const ParmVarDecl *Parm : Ctor->parameters()) {
+ if (Results.find(Parm) != Results.end())
+ continue;
+ if (const Stmt *S = InitAnalyzer.findMutation(Parm))
+ Results[Parm] = S;
+ }
+ }
+ }
+}
+
+const Stmt *
+FunctionParmMutationAnalyzer::findMutation(const ParmVarDecl *Parm) {
+ const auto Memoized = Results.find(Parm);
+ if (Memoized != Results.end())
+ return Memoized->second;
+
+ if (const Stmt *S = BodyAnalyzer.findMutation(Parm))
+ return Results[Parm] = S;
+
+ return Results[Parm] = nullptr;
+}
+
+} // namespace clang
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 05bc1a5d102c..afe2d264907f 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -93,6 +93,7 @@ public:
LiveVariables::Observer *obs = nullptr);
void dumpBlockLiveness(const SourceManager& M);
+ void dumpStmtLiveness(const SourceManager& M);
LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
: analysisContext(ac),
@@ -237,8 +238,8 @@ static const Stmt *LookThroughStmt(const Stmt *S) {
while (S) {
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreParens();
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
- S = EWC->getSubExpr();
+ if (const FullExpr *FE = dyn_cast<FullExpr>(S)) {
+ S = FE->getSubExpr();
continue;
}
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
@@ -327,6 +328,35 @@ void TransferFunctions::Visit(Stmt *S) {
// No need to unconditionally visit subexpressions.
return;
}
+ case Stmt::IfStmtClass: {
+ // If one of the branches is an expression rather than a compound
+ // statement, it will be bad if we mark it as live at the terminator
+ // of the if-statement (i.e., immediately after the condition expression).
+ AddLiveStmt(val.liveStmts, LV.SSetFact, cast<IfStmt>(S)->getCond());
+ return;
+ }
+ case Stmt::WhileStmtClass: {
+ // If the loop body is an expression rather than a compound statement,
+ // it will be bad if we mark it as live at the terminator of the loop
+ // (i.e., immediately after the condition expression).
+ AddLiveStmt(val.liveStmts, LV.SSetFact, cast<WhileStmt>(S)->getCond());
+ return;
+ }
+ case Stmt::DoStmtClass: {
+ // If the loop body is an expression rather than a compound statement,
+ // it will be bad if we mark it as live at the terminator of the loop
+ // (i.e., immediately after the condition expression).
+ AddLiveStmt(val.liveStmts, LV.SSetFact, cast<DoStmt>(S)->getCond());
+ return;
+ }
+ case Stmt::ForStmtClass: {
+ // If the loop body is an expression rather than a compound statement,
+ // it will be bad if we mark it as live at the terminator of the loop
+ // (i.e., immediately after the condition expression).
+ AddLiveStmt(val.liveStmts, LV.SSetFact, cast<ForStmt>(S)->getCond());
+ return;
+ }
+
}
for (Stmt *Child : S->children()) {
@@ -597,7 +627,7 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
it != ei; ++it) {
vec.push_back(it->first);
}
- llvm::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
+ llvm::sort(vec, [](const CFGBlock *A, const CFGBlock *B) {
return A->getBlockID() < B->getBlockID();
});
@@ -617,21 +647,38 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
declVec.push_back(*si);
}
- llvm::sort(declVec.begin(), declVec.end(),
- [](const Decl *A, const Decl *B) {
- return A->getLocStart() < B->getLocStart();
+ llvm::sort(declVec, [](const Decl *A, const Decl *B) {
+ return A->getBeginLoc() < B->getBeginLoc();
});
for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
de = declVec.end(); di != de; ++di) {
llvm::errs() << " " << (*di)->getDeclName().getAsString()
<< " <";
- (*di)->getLocation().dump(M);
+ (*di)->getLocation().print(llvm::errs(), M);
llvm::errs() << ">\n";
}
}
llvm::errs() << "\n";
}
+void LiveVariables::dumpStmtLiveness(const SourceManager &M) {
+ getImpl(impl).dumpStmtLiveness(M);
+}
+
+void LiveVariablesImpl::dumpStmtLiveness(const SourceManager &M) {
+ // Don't iterate over blockEndsToLiveness directly because it's not sorted.
+ for (auto I : *analysisContext.getCFG()) {
+
+ llvm::errs() << "\n[ B" << I->getBlockID()
+ << " (live statements at block exit) ]\n";
+ for (auto S : blocksEndToLiveness[I].liveStmts) {
+ llvm::errs() << "\n";
+ S->dump();
+ }
+ llvm::errs() << "\n";
+ }
+}
+
const void *LiveVariables::getTag() { static int x; return &x; }
const void *RelaxedLiveVariables::getTag() { static int x; return &x; }
diff --git a/lib/Analysis/ProgramPoint.cpp b/lib/Analysis/ProgramPoint.cpp
index d9833659d7b3..2d016cb13353 100644
--- a/lib/Analysis/ProgramPoint.cpp
+++ b/lib/Analysis/ProgramPoint.cpp
@@ -43,6 +43,181 @@ ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
}
}
+LLVM_DUMP_METHOD void ProgramPoint::dump() const {
+ return print(/*CR=*/"\n", llvm::errs());
+}
+
+static void printLocation(raw_ostream &Out, SourceLocation SLoc,
+ const SourceManager &SM,
+ StringRef CR,
+ StringRef Postfix) {
+ if (SLoc.isFileID()) {
+ Out << CR << "line=" << SM.getExpansionLineNumber(SLoc)
+ << " col=" << SM.getExpansionColumnNumber(SLoc) << Postfix;
+ }
+}
+
+void ProgramPoint::print(StringRef CR, llvm::raw_ostream &Out) const {
+ const ASTContext &Context =
+ getLocationContext()->getAnalysisDeclContext()->getASTContext();
+ const SourceManager &SM = Context.getSourceManager();
+ switch (getKind()) {
+ case ProgramPoint::BlockEntranceKind:
+ Out << "Block Entrance: B"
+ << castAs<BlockEntrance>().getBlock()->getBlockID();
+ break;
+
+ case ProgramPoint::FunctionExitKind: {
+ auto FEP = getAs<FunctionExitPoint>();
+ Out << "Function Exit: B" << FEP->getBlock()->getBlockID();
+ if (const ReturnStmt *RS = FEP->getStmt()) {
+ Out << CR << " Return: S" << RS->getID(Context) << CR;
+ RS->printPretty(Out, /*helper=*/nullptr, Context.getPrintingPolicy(),
+ /*Indentation=*/2, /*NewlineSymbol=*/CR);
+ }
+ break;
+ }
+ case ProgramPoint::BlockExitKind:
+ assert(false);
+ break;
+
+ case ProgramPoint::CallEnterKind:
+ Out << "CallEnter";
+ break;
+
+ case ProgramPoint::CallExitBeginKind:
+ Out << "CallExitBegin";
+ break;
+
+ case ProgramPoint::CallExitEndKind:
+ Out << "CallExitEnd";
+ break;
+
+ case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+ Out << "PostStmtPurgeDeadSymbols";
+ break;
+
+ case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+ Out << "PreStmtPurgeDeadSymbols";
+ break;
+
+ case ProgramPoint::EpsilonKind:
+ Out << "Epsilon Point";
+ break;
+
+ case ProgramPoint::LoopExitKind: {
+ LoopExit LE = castAs<LoopExit>();
+ Out << "LoopExit: " << LE.getLoopStmt()->getStmtClassName();
+ break;
+ }
+
+ case ProgramPoint::PreImplicitCallKind: {
+ ImplicitCallPoint PC = castAs<ImplicitCallPoint>();
+ Out << "PreCall: ";
+ PC.getDecl()->print(Out, Context.getLangOpts());
+ printLocation(Out, PC.getLocation(), SM, CR, /*Postfix=*/CR);
+ break;
+ }
+
+ case ProgramPoint::PostImplicitCallKind: {
+ ImplicitCallPoint PC = castAs<ImplicitCallPoint>();
+ Out << "PostCall: ";
+ PC.getDecl()->print(Out, Context.getLangOpts());
+ printLocation(Out, PC.getLocation(), SM, CR, /*Postfix=*/CR);
+ break;
+ }
+
+ case ProgramPoint::PostInitializerKind: {
+ Out << "PostInitializer: ";
+ const CXXCtorInitializer *Init = castAs<PostInitializer>().getInitializer();
+ if (const FieldDecl *FD = Init->getAnyMember())
+ Out << *FD;
+ else {
+ QualType Ty = Init->getTypeSourceInfo()->getType();
+ Ty = Ty.getLocalUnqualifiedType();
+ Ty.print(Out, Context.getLangOpts());
+ }
+ break;
+ }
+
+ case ProgramPoint::BlockEdgeKind: {
+ const BlockEdge &E = castAs<BlockEdge>();
+ Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+ << E.getDst()->getBlockID() << ')';
+
+ if (const Stmt *T = E.getSrc()->getTerminator()) {
+ SourceLocation SLoc = T->getBeginLoc();
+
+ Out << "\\|Terminator: ";
+ E.getSrc()->printTerminator(Out, Context.getLangOpts());
+ printLocation(Out, SLoc, SM, CR, /*Postfix=*/"");
+
+ if (isa<SwitchStmt>(T)) {
+ const Stmt *Label = E.getDst()->getLabel();
+
+ if (Label) {
+ if (const auto *C = dyn_cast<CaseStmt>(Label)) {
+ Out << CR << "case ";
+ if (C->getLHS())
+ C->getLHS()->printPretty(
+ Out, nullptr, Context.getPrintingPolicy(),
+ /*Indentation=*/0, /*NewlineSymbol=*/CR);
+
+ if (const Stmt *RHS = C->getRHS()) {
+ Out << " .. ";
+ RHS->printPretty(Out, nullptr, Context.getPrintingPolicy(),
+ /*Indetation=*/0, /*NewlineSymbol=*/CR);
+ }
+
+ Out << ":";
+ } else {
+ assert(isa<DefaultStmt>(Label));
+ Out << CR << "default:";
+ }
+ } else
+ Out << CR << "(implicit) default:";
+ } else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME
+ } else {
+ Out << CR << "Condition: ";
+ if (*E.getSrc()->succ_begin() == E.getDst())
+ Out << "true";
+ else
+ Out << "false";
+ }
+
+ Out << CR;
+ }
+
+ break;
+ }
+
+ default: {
+ const Stmt *S = castAs<StmtPoint>().getStmt();
+ assert(S != nullptr && "Expecting non-null Stmt");
+
+ Out << S->getStmtClassName() << " S" << S->getID(Context) << " <"
+ << (const void *)S << "> ";
+ S->printPretty(Out, /*helper=*/nullptr, Context.getPrintingPolicy(),
+ /*Indentation=*/2, /*NewlineSymbol=*/CR);
+ printLocation(Out, S->getBeginLoc(), SM, CR, /*Postfix=*/"");
+
+ if (getAs<PreStmt>())
+ Out << CR << "PreStmt" << CR;
+ else if (getAs<PostLoad>())
+ Out << CR << "PostLoad" << CR;
+ else if (getAs<PostStore>())
+ Out << CR << "PostStore" << CR;
+ else if (getAs<PostLValue>())
+ Out << CR << "PostLValue" << CR;
+ else if (getAs<PostAllocatorCall>())
+ Out << CR << "PostAllocatorCall" << CR;
+
+ break;
+ }
+ }
+}
+
SimpleProgramPointTag::SimpleProgramPointTag(StringRef MsgProvider,
StringRef Msg)
: Desc((MsgProvider + " : " + Msg).str()) {}
diff --git a/lib/Analysis/PseudoConstantAnalysis.cpp b/lib/Analysis/PseudoConstantAnalysis.cpp
deleted file mode 100644
index 83b545a7be83..000000000000
--- a/lib/Analysis/PseudoConstantAnalysis.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-//== PseudoConstantAnalysis.cpp - Find Pseudoconstants in the AST-*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file tracks the usage of variables in a Decl body to see if they are
-// never written to, implying that they constant. This is useful in static
-// analysis to see if a developer might have intended a variable to be const.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/Expr.h"
-#include "clang/AST/Stmt.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include <deque>
-
-using namespace clang;
-
-typedef llvm::SmallPtrSet<const VarDecl*, 32> VarDeclSet;
-
-PseudoConstantAnalysis::PseudoConstantAnalysis(const Stmt *DeclBody) :
- DeclBody(DeclBody), Analyzed(false) {
- NonConstantsImpl = new VarDeclSet;
- UsedVarsImpl = new VarDeclSet;
-}
-
-PseudoConstantAnalysis::~PseudoConstantAnalysis() {
- delete (VarDeclSet*)NonConstantsImpl;
- delete (VarDeclSet*)UsedVarsImpl;
-}
-
-// Returns true if the given ValueDecl is never written to in the given DeclBody
-bool PseudoConstantAnalysis::isPseudoConstant(const VarDecl *VD) {
- // Only local and static variables can be pseudoconstants
- if (!VD->hasLocalStorage() && !VD->isStaticLocal())
- return false;
-
- if (!Analyzed) {
- RunAnalysis();
- Analyzed = true;
- }
-
- VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
-
- return !NonConstants->count(VD);
-}
-
-// Returns true if the variable was used (self assignments don't count)
-bool PseudoConstantAnalysis::wasReferenced(const VarDecl *VD) {
- if (!Analyzed) {
- RunAnalysis();
- Analyzed = true;
- }
-
- VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
-
- return UsedVars->count(VD);
-}
-
-// Returns a Decl from a (Block)DeclRefExpr (if any)
-const Decl *PseudoConstantAnalysis::getDecl(const Expr *E) {
- if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
- return DR->getDecl();
- else
- return nullptr;
-}
-
-void PseudoConstantAnalysis::RunAnalysis() {
- std::deque<const Stmt *> WorkList;
- VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
- VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
-
- // Start with the top level statement of the function
- WorkList.push_back(DeclBody);
-
- while (!WorkList.empty()) {
- const Stmt *Head = WorkList.front();
- WorkList.pop_front();
-
- if (const Expr *Ex = dyn_cast<Expr>(Head))
- Head = Ex->IgnoreParenCasts();
-
- switch (Head->getStmtClass()) {
- // Case 1: Assignment operators modifying VarDecls
- case Stmt::BinaryOperatorClass: {
- const BinaryOperator *BO = cast<BinaryOperator>(Head);
- // Look for a Decl on the LHS
- const Decl *LHSDecl = getDecl(BO->getLHS()->IgnoreParenCasts());
- if (!LHSDecl)
- break;
-
- // We found a binary operator with a DeclRefExpr on the LHS. We now check
- // for any of the assignment operators, implying that this Decl is being
- // written to.
- switch (BO->getOpcode()) {
- // Self-assignments don't count as use of a variable
- case BO_Assign: {
- // Look for a DeclRef on the RHS
- const Decl *RHSDecl = getDecl(BO->getRHS()->IgnoreParenCasts());
-
- // If the Decls match, we have self-assignment
- if (LHSDecl == RHSDecl)
- // Do not visit the children
- continue;
-
- LLVM_FALLTHROUGH;
- }
- case BO_AddAssign:
- case BO_SubAssign:
- case BO_MulAssign:
- case BO_DivAssign:
- case BO_AndAssign:
- case BO_OrAssign:
- case BO_XorAssign:
- case BO_ShlAssign:
- case BO_ShrAssign: {
- const VarDecl *VD = dyn_cast<VarDecl>(LHSDecl);
- // The DeclRefExpr is being assigned to - mark it as non-constant
- if (VD)
- NonConstants->insert(VD);
- break;
- }
-
- default:
- break;
- }
- break;
- }
-
- // Case 2: Pre/post increment/decrement and address of
- case Stmt::UnaryOperatorClass: {
- const UnaryOperator *UO = cast<UnaryOperator>(Head);
-
- // Look for a DeclRef in the subexpression
- const Decl *D = getDecl(UO->getSubExpr()->IgnoreParenCasts());
- if (!D)
- break;
-
- // We found a unary operator with a DeclRef as a subexpression. We now
- // check for any of the increment/decrement operators, as well as
- // addressOf.
- switch (UO->getOpcode()) {
- case UO_PostDec:
- case UO_PostInc:
- case UO_PreDec:
- case UO_PreInc:
- // The DeclRef is being changed - mark it as non-constant
- case UO_AddrOf: {
- // If we are taking the address of the DeclRefExpr, assume it is
- // non-constant.
- const VarDecl *VD = dyn_cast<VarDecl>(D);
- if (VD)
- NonConstants->insert(VD);
- break;
- }
-
- default:
- break;
- }
- break;
- }
-
- // Case 3: Reference Declarations
- case Stmt::DeclStmtClass: {
- const DeclStmt *DS = cast<DeclStmt>(Head);
- // Iterate over each decl and see if any of them contain reference decls
- for (const auto *I : DS->decls()) {
- // We only care about VarDecls
- const VarDecl *VD = dyn_cast<VarDecl>(I);
- if (!VD)
- continue;
-
- // We found a VarDecl; make sure it is a reference type
- if (!VD->getType().getTypePtr()->isReferenceType())
- continue;
-
- // Try to find a Decl in the initializer
- const Decl *D = getDecl(VD->getInit()->IgnoreParenCasts());
- if (!D)
- break;
-
- // If the reference is to another var, add the var to the non-constant
- // list
- if (const VarDecl *RefVD = dyn_cast<VarDecl>(D)) {
- NonConstants->insert(RefVD);
- continue;
- }
- }
- break;
- }
-
- // Case 4: Variable references
- case Stmt::DeclRefExprClass: {
- const DeclRefExpr *DR = cast<DeclRefExpr>(Head);
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- // Add the Decl to the used list
- UsedVars->insert(VD);
- continue;
- }
- break;
- }
-
- // Case 5: Block expressions
- case Stmt::BlockExprClass: {
- const BlockExpr *B = cast<BlockExpr>(Head);
- // Add the body of the block to the list
- WorkList.push_back(B->getBody());
- continue;
- }
-
- default:
- break;
- } // switch (head->getStmtClass())
-
- // Add all substatements to the worklist
- for (const Stmt *SubStmt : Head->children())
- if (SubStmt)
- WorkList.push_back(SubStmt);
- } // while (!WorkList.empty())
-}
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index ed26a94f3d60..87f4f7010f98 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -153,7 +153,7 @@ static bool isExpandedFromConfigurationMacro(const Stmt *S,
// value comes from a macro, but we can do much better. This is likely
// to be over conservative. This logic is factored into a separate function
// so that we can refine it later.
- SourceLocation L = S->getLocStart();
+ SourceLocation L = S->getBeginLoc();
if (L.isMacroID()) {
SourceManager &SM = PP.getSourceManager();
if (IgnoreYES_NO) {
@@ -200,7 +200,7 @@ static bool isConfigurationValue(const Stmt *S,
// Special case looking for the sigil '()' around an integer literal.
if (const ParenExpr *PE = dyn_cast<ParenExpr>(S))
- if (!PE->getLocStart().isMacroID())
+ if (!PE->getBeginLoc().isMacroID())
return isConfigurationValue(PE->getSubExpr(), PP, SilenceableCondVal,
IncludeIntegers, true);
@@ -219,7 +219,7 @@ static bool isConfigurationValue(const Stmt *S,
return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
case Stmt::ObjCBoolLiteralExprClass:
IgnoreYES_NO = true;
- // Fallthrough.
+ LLVM_FALLTHROUGH;
case Stmt::CXXBoolLiteralExprClass:
case Stmt::IntegerLiteralClass: {
const Expr *E = cast<Expr>(S);
@@ -446,7 +446,7 @@ bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
}
static bool isValidDeadStmt(const Stmt *S) {
- if (S->getLocStart().isInvalid())
+ if (S->getBeginLoc().isInvalid())
return false;
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S))
return BO->getOpcode() != BO_Comma;
@@ -474,9 +474,9 @@ const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
static int SrcCmp(const std::pair<const CFGBlock *, const Stmt *> *p1,
const std::pair<const CFGBlock *, const Stmt *> *p2) {
- if (p1->second->getLocStart() < p2->second->getLocStart())
+ if (p1->second->getBeginLoc() < p2->second->getBeginLoc())
return -1;
- if (p2->second->getLocStart() < p1->second->getLocStart())
+ if (p2->second->getBeginLoc() < p1->second->getBeginLoc())
return 1;
return 0;
}
@@ -509,7 +509,7 @@ unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
}
// Specially handle macro-expanded code.
- if (S->getLocStart().isMacroID()) {
+ if (S->getBeginLoc().isMacroID()) {
count += scanMaybeReachableFromBlock(Block, PP, Reachable);
continue;
}
@@ -592,7 +592,7 @@ static SourceLocation GetUnreachableLoc(const Stmt *S,
case Expr::CXXFunctionalCastExprClass: {
const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
R1 = CE->getSubExpr()->getSourceRange();
- return CE->getLocStart();
+ return CE->getBeginLoc();
}
case Stmt::CXXTryStmtClass: {
return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
@@ -605,7 +605,7 @@ static SourceLocation GetUnreachableLoc(const Stmt *S,
default: ;
}
R1 = S->getSourceRange();
- return S->getLocStart();
+ return S->getBeginLoc();
}
void DeadCodeScan::reportDeadCode(const CFGBlock *B,
@@ -631,12 +631,12 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
// a for/for-range loop. This is the block that contains
// the increment code.
if (const Stmt *LoopTarget = B->getLoopTarget()) {
- SourceLocation Loc = LoopTarget->getLocStart();
+ SourceLocation Loc = LoopTarget->getBeginLoc();
SourceRange R1(Loc, Loc), R2;
if (const ForStmt *FS = dyn_cast<ForStmt>(LoopTarget)) {
const Expr *Inc = FS->getInc();
- Loc = Inc->getLocStart();
+ Loc = Inc->getBeginLoc();
R2 = Inc->getSourceRange();
}
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 03cc234dce5c..78e1b056e1d7 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -33,6 +33,7 @@
#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
@@ -41,6 +42,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -64,13 +66,6 @@ using namespace threadSafety;
// Key method definition
ThreadSafetyHandler::~ThreadSafetyHandler() = default;
-namespace {
-
-class TILPrinter :
- public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
-
-} // namespace
-
/// Issue a warning about an invalid lock expression
static void warnInvalidLock(ThreadSafetyHandler &Handler,
const Expr *MutexExp, const NamedDecl *D,
@@ -86,8 +81,8 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler,
namespace {
-/// A set of CapabilityInfo objects, which are compiled from the
-/// requires attributes on a function.
+/// A set of CapabilityExpr objects, which are compiled from thread safety
+/// attributes on a function.
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
/// Push M onto list, but discard duplicates.
@@ -142,13 +137,16 @@ public:
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const = 0;
+ virtual void handleLock(FactSet &FSet, FactManager &FactMan,
+ const FactEntry &entry, ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const = 0;
virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const = 0;
// Return true if LKind >= LK, where exclusive > shared
- bool isAtLeast(LockKind LK) {
+ bool isAtLeast(LockKind LK) const {
return (LKind == LK_Exclusive) || (LK == LK_Shared);
}
};
@@ -159,7 +157,7 @@ using FactID = unsigned short;
/// the analysis of a single routine.
class FactManager {
private:
- std::vector<std::unique_ptr<FactEntry>> Facts;
+ std::vector<std::unique_ptr<const FactEntry>> Facts;
public:
FactID newFact(std::unique_ptr<FactEntry> Entry) {
@@ -168,7 +166,6 @@ public:
}
const FactEntry &operator[](FactID F) const { return *Facts[F]; }
- FactEntry &operator[](FactID F) { return *Facts[F]; }
};
/// A FactSet is the set of facts that are known to be true at a
@@ -238,22 +235,23 @@ public:
});
}
- FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
+ const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) {
return FM[ID].matches(CapE);
});
return I != end() ? &FM[*I] : nullptr;
}
- FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
+ const FactEntry *findLockUniv(FactManager &FM,
+ const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
return FM[ID].matchesUniv(CapE);
});
return I != end() ? &FM[*I] : nullptr;
}
- FactEntry *findPartialMatch(FactManager &FM,
- const CapabilityExpr &CapE) const {
+ const FactEntry *findPartialMatch(FactManager &FM,
+ const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
return FM[ID].partiallyMatches(CapE);
});
@@ -419,7 +417,7 @@ private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
std::vector<unsigned> CtxIndices;
- std::vector<std::pair<Stmt *, Context>> SavedContexts;
+ std::vector<std::pair<const Stmt *, Context>> SavedContexts;
public:
LocalVariableMap() {
@@ -460,7 +458,7 @@ public:
/// Return the next context after processing S. This function is used by
/// clients of the class to get the appropriate context when traversing the
/// CFG. It must be called for every assignment or DeclStmt.
- Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
+ Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
if (SavedContexts[CtxIndex+1].first == S) {
CtxIndex++;
Context Result = SavedContexts[CtxIndex].second;
@@ -522,7 +520,7 @@ protected:
unsigned getContextIndex() { return SavedContexts.size()-1; }
// Save the current context for later replay
- void saveContext(Stmt *S, Context C) {
+ void saveContext(const Stmt *S, Context C) {
SavedContexts.push_back(std::make_pair(S, C));
}
@@ -592,7 +590,7 @@ CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
namespace {
/// Visitor which builds a LocalVariableMap
-class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
+class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
public:
LocalVariableMap* VMap;
LocalVariableMap::Context Ctx;
@@ -600,16 +598,16 @@ public:
VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
: VMap(VM), Ctx(C) {}
- void VisitDeclStmt(DeclStmt *S);
- void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitDeclStmt(const DeclStmt *S);
+ void VisitBinaryOperator(const BinaryOperator *BO);
};
} // namespace
// Add new local variables to the variable map
-void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
+void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
bool modifiedCtx = false;
- DeclGroupRef DGrp = S->getDeclGroup();
+ const DeclGroupRef DGrp = S->getDeclGroup();
for (const auto *D : DGrp) {
if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
const Expr *E = VD->getInit();
@@ -627,7 +625,7 @@ void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
}
// Update local variable definitions in variable map
-void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
+void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
@@ -734,7 +732,7 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
CtxIndices.resize(CFGraph->getNumBlockIDs());
for (const auto *CurrBlock : *SortedGraph) {
- int CurrBlockID = CurrBlock->getBlockID();
+ unsigned CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
VisitedBlocks.insert(CurrBlock);
@@ -750,7 +748,7 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
continue;
}
- int PrevBlockID = (*PI)->getBlockID();
+ unsigned PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
if (CtxInit) {
@@ -780,7 +778,7 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
switch (BI.getKind()) {
case CFGElement::Statement: {
CFGStmt CS = BI.castAs<CFGStmt>();
- VMapBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
+ VMapBuilder.Visit(CS.getStmt());
break;
}
default:
@@ -819,13 +817,13 @@ static void findBlockLocations(CFG *CFGraph,
// Find the source location of the last statement in the block, if the
// block is not empty.
if (const Stmt *S = CurrBlock->getTerminator()) {
- CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
} else {
for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
BE = CurrBlock->rend(); BI != BE; ++BI) {
// FIXME: Handle other CFGElement kinds.
if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
- CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
+ CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
break;
}
}
@@ -837,7 +835,7 @@ static void findBlockLocations(CFG *CFGraph,
for (const auto &BI : *CurrBlock) {
// FIXME: Handle other CFGElement kinds.
if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
- CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
+ CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
break;
}
}
@@ -873,6 +871,12 @@ public:
}
}
+ void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
+ ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ Handler.handleDoubleLock(DiagKind, entry.toString(), entry.loc());
+ }
+
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
@@ -887,63 +891,117 @@ public:
class ScopedLockableFactEntry : public FactEntry {
private:
- SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
+ enum UnderlyingCapabilityKind {
+ UCK_Acquired, ///< Any kind of acquired capability.
+ UCK_ReleasedShared, ///< Shared capability that was released.
+ UCK_ReleasedExclusive, ///< Exclusive capability that was released.
+ };
+
+ using UnderlyingCapability =
+ llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>;
+
+ SmallVector<UnderlyingCapability, 4> UnderlyingMutexes;
public:
- ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
- const CapExprSet &Excl, const CapExprSet &Shrd)
- : FactEntry(CE, LK_Exclusive, Loc, false) {
- for (const auto &M : Excl)
- UnderlyingMutexes.push_back(M.sexpr());
- for (const auto &M : Shrd)
- UnderlyingMutexes.push_back(M.sexpr());
+ ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
+ : FactEntry(CE, LK_Exclusive, Loc, false) {}
+
+ void addExclusiveLock(const CapabilityExpr &M) {
+ UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
+ }
+
+ void addSharedLock(const CapabilityExpr &M) {
+ UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
+ }
+
+ void addExclusiveUnlock(const CapabilityExpr &M) {
+ UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive);
+ }
+
+ void addSharedUnlock(const CapabilityExpr &M) {
+ UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared);
}
void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
- for (const auto *UnderlyingMutex : UnderlyingMutexes) {
- if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
+ for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ const auto *Entry = FSet.findLock(
+ FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false));
+ if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) ||
+ (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) {
// If this scoped lock manages another mutex, and if the underlying
- // mutex is still held, then warn about the underlying mutex.
+ // mutex is still/not held, then warn about the underlying mutex.
Handler.handleMutexHeldEndOfScope(
- "mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
+ "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc,
+ LEK);
}
}
}
+ void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
+ ThreadSafetyHandler &Handler,
+ StringRef DiagKind) const override {
+ for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
+
+ if (UnderlyingMutex.getInt() == UCK_Acquired)
+ lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler,
+ DiagKind);
+ else
+ unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind);
+ }
+ }
+
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
- for (const auto *UnderlyingMutex : UnderlyingMutexes) {
- CapabilityExpr UnderCp(UnderlyingMutex, false);
- auto UnderEntry = llvm::make_unique<LockableFactEntry>(
- !UnderCp, LK_Exclusive, UnlockLoc);
-
- if (FullyRemove) {
- // We're destroying the managing object.
- // Remove the underlying mutex if it exists; but don't warn.
- if (FSet.findLock(FactMan, UnderCp)) {
- FSet.removeLock(FactMan, UnderCp);
- FSet.addLock(FactMan, std::move(UnderEntry));
- }
+ for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
+
+ // Remove/lock the underlying mutex if it exists/is still unlocked; warn
+ // on double unlocking/locking if we're not destroying the scoped object.
+ ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
+ if (UnderlyingMutex.getInt() == UCK_Acquired) {
+ unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind);
} else {
- // We're releasing the underlying mutex, but not destroying the
- // managing object. Warn on dual release.
- if (!FSet.findLock(FactMan, UnderCp)) {
- Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
- UnlockLoc);
- }
- FSet.removeLock(FactMan, UnderCp);
- FSet.addLock(FactMan, std::move(UnderEntry));
+ LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared
+ ? LK_Shared
+ : LK_Exclusive;
+ lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind);
}
}
if (FullyRemove)
FSet.removeLock(FactMan, Cp);
}
+
+private:
+ void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
+ LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
+ StringRef DiagKind) const {
+ if (!FSet.findLock(FactMan, Cp)) {
+ FSet.removeLock(FactMan, !Cp);
+ FSet.addLock(FactMan,
+ llvm::make_unique<LockableFactEntry>(Cp, kind, loc));
+ } else if (Handler) {
+ Handler->handleDoubleLock(DiagKind, Cp.toString(), loc);
+ }
+ }
+
+ void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
+ SourceLocation loc, ThreadSafetyHandler *Handler,
+ StringRef DiagKind) const {
+ if (FSet.findLock(FactMan, Cp)) {
+ FSet.removeLock(FactMan, Cp);
+ FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
+ !Cp, LK_Exclusive, loc));
+ } else if (Handler) {
+ Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc);
+ }
+ }
};
/// Class which implements the core thread safety analysis routines.
@@ -976,11 +1034,11 @@ public:
StringRef DiagKind);
template <typename AttrType>
- void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
const NamedDecl *D, VarDecl *SelfDecl = nullptr);
template <class AttrType>
- void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
+ void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
const NamedDecl *D,
const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
Expr *BrE, bool Neg);
@@ -1232,7 +1290,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
if (!ReqAttr && !Entry->negative()) {
// look for the negative capability, and remove it from the fact set.
CapabilityExpr NegC = !*Entry;
- FactEntry *Nen = FSet.findLock(FactMan, NegC);
+ const FactEntry *Nen = FSet.findLock(FactMan, NegC);
if (Nen) {
FSet.removeLock(FactMan, NegC);
}
@@ -1251,9 +1309,9 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
}
// FIXME: Don't always warn when we have support for reentrant locks.
- if (FSet.findLock(FactMan, *Entry)) {
+ if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
- Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
+ Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind);
} else {
FSet.addLock(FactMan, std::move(Entry));
}
@@ -1289,7 +1347,7 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
- Expr *Exp, const NamedDecl *D,
+ const Expr *Exp, const NamedDecl *D,
VarDecl *SelfDecl) {
if (Attr->args_size() == 0) {
// The mutex held is the "this" object.
@@ -1321,7 +1379,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
/// any duplicates.
template <class AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
- Expr *Exp, const NamedDecl *D,
+ const Expr *Exp, const NamedDecl *D,
const CFGBlock *PredBlock,
const CFGBlock *CurrBlock,
Expr *BrE, bool Neg) {
@@ -1369,14 +1427,17 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (!Cond)
return nullptr;
- if (const auto *CallExp = dyn_cast<CallExpr>(Cond))
+ if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
+ if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
+ return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
return CallExp;
+ }
else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
- else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond))
- return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
+ else if (const auto *FE = dyn_cast<FullExpr>(Cond))
+ return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
@@ -1412,6 +1473,17 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (BOP->getOpcode() == BO_LOr)
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
return nullptr;
+ } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
+ bool TCond, FCond;
+ if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
+ getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
+ if (TCond && !FCond)
+ return getTrylockCallExpr(COP->getCond(), C, Negate);
+ if (!TCond && FCond) {
+ Negate = !Negate;
+ return getTrylockCallExpr(COP->getCond(), C, Negate);
+ }
+ }
}
return nullptr;
}
@@ -1426,7 +1498,8 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
Result = ExitSet;
const Stmt *Cond = PredBlock->getTerminatorCondition();
- if (!Cond)
+ // We don't acquire try-locks on ?: branches, only when its result is used.
+ if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminator()))
return;
bool Negate = false;
@@ -1434,7 +1507,7 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
StringRef CapDiagKind = "mutex";
- auto *Exp = const_cast<CallExpr *>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
if (!Exp)
return;
@@ -1494,7 +1567,7 @@ namespace {
/// An expression may cause us to add or remove locks from the lockset, or else
/// output error messages related to missing locks.
/// FIXME: In future, we may be able to not inherit from a visitor.
-class BuildLockset : public StmtVisitor<BuildLockset> {
+class BuildLockset : public ConstStmtVisitor<BuildLockset> {
friend class ThreadSafetyAnalyzer;
ThreadSafetyAnalyzer *Analyzer;
@@ -1514,19 +1587,23 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
void checkPtAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK = POK_VarAccess);
- void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
+ void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
+ void examineArguments(const FunctionDecl *FD,
+ CallExpr::const_arg_iterator ArgBegin,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool SkipFirstParam = false);
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
- : StmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
+ : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
- void VisitUnaryOperator(UnaryOperator *UO);
- void VisitBinaryOperator(BinaryOperator *BO);
- void VisitCastExpr(CastExpr *CE);
- void VisitCallExpr(CallExpr *Exp);
- void VisitCXXConstructExpr(CXXConstructExpr *Exp);
- void VisitDeclStmt(DeclStmt *S);
+ void VisitUnaryOperator(const UnaryOperator *UO);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitCastExpr(const CastExpr *CE);
+ void VisitCallExpr(const CallExpr *Exp);
+ void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
+ void VisitDeclStmt(const DeclStmt *S);
};
} // namespace
@@ -1549,7 +1626,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
if (Cp.negative()) {
// Negative capabilities act like locks excluded
- FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
+ const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
@@ -1570,7 +1647,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
return;
}
- FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
+ const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
bool NoError = true;
if (!LDat) {
// No exact match found. Look for a partial match.
@@ -1606,7 +1683,7 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
return;
}
- FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp);
if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
@@ -1630,6 +1707,9 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
if (const auto *E = VD->getInit()) {
+ // Guard against self-initialization. e.g., int &i = i;
+ if (E == Exp)
+ break;
Exp = E;
continue;
}
@@ -1718,7 +1798,8 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
-void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
+void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
+ VarDecl *VD) {
SourceLocation Loc = Exp->getExprLoc();
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
@@ -1858,25 +1939,32 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
if (isScopedVar) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
SourceLocation MLoc = VD->getLocation();
- DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ DeclRefExpr DRE(VD->getASTContext(), VD, false, VD->getType(), VK_LValue,
+ VD->getLocation());
// FIXME: does this store a pointer to DRE?
CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
- std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(),
- std::back_inserter(ExclusiveLocksToAdd));
- std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(),
- std::back_inserter(SharedLocksToAdd));
- Analyzer->addLock(FSet,
- llvm::make_unique<ScopedLockableFactEntry>(
- Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
- CapDiagKind);
+ auto ScopedEntry = llvm::make_unique<ScopedLockableFactEntry>(Scp, MLoc);
+ for (const auto &M : ExclusiveLocksToAdd)
+ ScopedEntry->addExclusiveLock(M);
+ for (const auto &M : ScopedExclusiveReqs)
+ ScopedEntry->addExclusiveLock(M);
+ for (const auto &M : SharedLocksToAdd)
+ ScopedEntry->addSharedLock(M);
+ for (const auto &M : ScopedSharedReqs)
+ ScopedEntry->addSharedLock(M);
+ for (const auto &M : ExclusiveLocksToRemove)
+ ScopedEntry->addExclusiveUnlock(M);
+ for (const auto &M : SharedLocksToRemove)
+ ScopedEntry->addSharedUnlock(M);
+ Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind);
}
}
/// For unary operations which read and write a variable, we need to
/// check whether we hold any required mutexes. Reads are checked in
/// VisitCastExpr.
-void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
+void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
switch (UO->getOpcode()) {
case UO_PostDec:
case UO_PostInc:
@@ -1892,7 +1980,7 @@ void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
/// For binary operations which assign to a variable (writes), we need to check
/// whether we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
-void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
+void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
@@ -1905,16 +1993,43 @@ void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
/// need to ensure we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
-void BuildLockset::VisitCastExpr(CastExpr *CE) {
+void BuildLockset::VisitCastExpr(const CastExpr *CE) {
if (CE->getCastKind() != CK_LValueToRValue)
return;
checkAccess(CE->getSubExpr(), AK_Read);
}
-void BuildLockset::VisitCallExpr(CallExpr *Exp) {
- bool ExamineArgs = true;
- bool OperatorFun = false;
+void BuildLockset::examineArguments(const FunctionDecl *FD,
+ CallExpr::const_arg_iterator ArgBegin,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool SkipFirstParam) {
+ // Currently we can't do anything if we don't know the function declaration.
+ if (!FD)
+ return;
+
+ // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
+ // only turns off checking within the body of a function, but we also
+ // use it to turn off checking in arguments to the function. This
+ // could result in some false negatives, but the alternative is to
+ // create yet another attribute.
+ if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
+ return;
+
+ const ArrayRef<ParmVarDecl *> Params = FD->parameters();
+ auto Param = Params.begin();
+ if (SkipFirstParam)
+ ++Param;
+ // There can be default arguments, so we stop when one iterator is at end().
+ for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
+ ++Param, ++Arg) {
+ QualType Qt = (*Param)->getType();
+ if (Qt->isReferenceType())
+ checkAccess(*Arg, AK_Read, POK_PassByRef);
+ }
+}
+
+void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
@@ -1933,13 +2048,12 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
- } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
- OperatorFun = true;
+ examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
+ } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
auto OEop = OE->getOperator();
switch (OEop) {
case OO_Equal: {
- ExamineArgs = false;
const Expr *Target = OE->getArg(0);
const Expr *Source = OE->getArg(1);
checkAccess(Target, AK_Written);
@@ -1948,60 +2062,27 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
}
case OO_Star:
case OO_Arrow:
- case OO_Subscript: {
- const Expr *Obj = OE->getArg(0);
- checkAccess(Obj, AK_Read);
+ case OO_Subscript:
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
// Grrr. operator* can be multiplication...
- checkPtAccess(Obj, AK_Read);
+ checkPtAccess(OE->getArg(0), AK_Read);
}
- break;
- }
+ LLVM_FALLTHROUGH;
default: {
// TODO: get rid of this, and rely on pass-by-ref instead.
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
+ // Check the remaining arguments. For method operators, the first
+ // argument is the implicit self argument, and doesn't appear in the
+ // FunctionDecl, but for non-methods it does.
+ const FunctionDecl *FD = OE->getDirectCallee();
+ examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
+ /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
break;
}
}
- }
-
- if (ExamineArgs) {
- if (FunctionDecl *FD = Exp->getDirectCallee()) {
- // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
- // only turns off checking within the body of a function, but we also
- // use it to turn off checking in arguments to the function. This
- // could result in some false negatives, but the alternative is to
- // create yet another attribute.
- if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
- unsigned Fn = FD->getNumParams();
- unsigned Cn = Exp->getNumArgs();
- unsigned Skip = 0;
-
- unsigned i = 0;
- if (OperatorFun) {
- if (isa<CXXMethodDecl>(FD)) {
- // First arg in operator call is implicit self argument,
- // and doesn't appear in the FunctionDecl.
- Skip = 1;
- Cn--;
- } else {
- // Ignore the first argument of operators; it's been checked above.
- i = 1;
- }
- }
- // Ignore default arguments
- unsigned n = (Fn < Cn) ? Fn : Cn;
-
- for (; i < n; ++i) {
- ParmVarDecl* Pvd = FD->getParamDecl(i);
- Expr* Arg = Exp->getArg(i+Skip);
- QualType Qt = Pvd->getType();
- if (Qt->isReferenceType())
- checkAccess(Arg, AK_Read, POK_PassByRef);
- }
- }
- }
+ } else {
+ examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
}
auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
@@ -2010,13 +2091,14 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
handleCall(Exp, D);
}
-void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
+void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
const CXXConstructorDecl *D = Exp->getConstructor();
if (D && D->isCopyConstructor()) {
const Expr* Source = Exp->getArg(0);
checkAccess(Source, AK_Read);
+ } else {
+ examineArguments(D, Exp->arg_begin(), Exp->arg_end());
}
- // FIXME -- only handles constructors in DeclStmt below.
}
static CXXConstructorDecl *
@@ -2046,7 +2128,7 @@ static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
SourceRange(Loc, Loc));
}
-void BuildLockset::VisitDeclStmt(DeclStmt *S) {
+void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
// adjust the context
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
@@ -2080,7 +2162,7 @@ void BuildLockset::VisitDeclStmt(DeclStmt *S) {
CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
if (!CtorD || !CtorD->hasAttrs())
continue;
- handleCall(buildFakeCtorCall(CtorD, {E}, E->getLocStart()), CtorD, VD);
+ handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD);
}
}
}
@@ -2242,8 +2324,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// We must ignore such methods.
if (A->args_size() == 0)
return;
- // FIXME -- deal with exclusive vs. shared unlock functions?
- getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D);
+ getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
+ nullptr, D);
getMutexIDs(LocksReleased, A, nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
@@ -2279,7 +2361,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
for (const auto *CurrBlock : *SortedGraph) {
- int CurrBlockID = CurrBlock->getBlockID();
+ unsigned CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
// Use the default initial lockset in case there are no predecessors.
@@ -2306,7 +2388,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
continue;
- int PrevBlockID = (*PI)->getBlockID();
+ unsigned PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
// Ignore edges from blocks that can't return.
@@ -2347,7 +2429,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// Process continue and break blocks. Assume that the lockset for the
// resulting block is unaffected by any discrepancies in them.
for (const auto *PrevBlock : SpecialBlocks) {
- int PrevBlockID = PrevBlock->getBlockID();
+ unsigned PrevBlockID = PrevBlock->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
if (!LocksetInitialized) {
@@ -2382,21 +2464,21 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
switch (BI.getKind()) {
case CFGElement::Statement: {
CFGStmt CS = BI.castAs<CFGStmt>();
- LocksetBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
+ LocksetBuilder.Visit(CS.getStmt());
break;
}
// Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
case CFGElement::AutomaticObjectDtor: {
CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
- auto *DD = const_cast<CXXDestructorDecl *>(
- AD.getDestructorDecl(AC.getASTContext()));
+ const auto *DD = AD.getDestructorDecl(AC.getASTContext());
if (!DD->hasAttrs())
break;
// Create a dummy expression,
auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
- DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
- VK_LValue, AD.getTriggerStmt()->getLocEnd());
+ DeclRefExpr DRE(VD->getASTContext(), VD, false,
+ VD->getType().getNonReferenceType(), VK_LValue,
+ AD.getTriggerStmt()->getEndLoc());
LocksetBuilder.handleCall(&DRE, DD);
break;
}
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index fced17ff9197..14d1d9c7a8f7 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -128,7 +128,8 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// Hack to handle constructors, where self cannot be recovered from
// the expression.
if (SelfDecl && !Ctx.SelfArg) {
- DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ DeclRefExpr SelfDRE(SelfDecl->getASTContext(), SelfDecl, false,
+ SelfDecl->getType(), VK_LValue,
SelfDecl->getLocation());
Ctx.SelfArg = &SelfDRE;
@@ -211,6 +212,8 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
return translateCXXThisExpr(cast<CXXThisExpr>(S), Ctx);
case Stmt::MemberExprClass:
return translateMemberExpr(cast<MemberExpr>(S), Ctx);
+ case Stmt::ObjCIvarRefExprClass:
+ return translateObjCIVarRefExpr(cast<ObjCIvarRefExpr>(S), Ctx);
case Stmt::CallExprClass:
return translateCallExpr(cast<CallExpr>(S), Ctx);
case Stmt::CXXMemberCallExprClass:
@@ -233,6 +236,8 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
cast<BinaryConditionalOperator>(S), Ctx);
// We treat these as no-ops
+ case Stmt::ConstantExprClass:
+ return translate(cast<ConstantExpr>(S)->getSubExpr(), Ctx);
case Stmt::ParenExprClass:
return translate(cast<ParenExpr>(S)->getSubExpr(), Ctx);
case Stmt::ExprWithCleanupsClass:
@@ -311,9 +316,9 @@ static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
return nullptr;
}
-static bool hasCppPointerType(const til::SExpr *E) {
+static bool hasAnyPointerType(const til::SExpr *E) {
auto *VD = getValueDeclFromSExpr(E);
- if (VD && VD->getType()->isPointerType())
+ if (VD && VD->getType()->isAnyPointerType())
return true;
if (const auto *C = dyn_cast<til::Cast>(E))
return C->castOpcode() == til::CAST_objToPtr;
@@ -344,7 +349,20 @@ til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
D = getFirstVirtualDecl(VD);
til::Project *P = new (Arena) til::Project(E, D);
- if (hasCppPointerType(BE))
+ if (hasAnyPointerType(BE))
+ P->setArrow(true);
+ return P;
+}
+
+til::SExpr *SExprBuilder::translateObjCIVarRefExpr(const ObjCIvarRefExpr *IVRE,
+ CallingContext *Ctx) {
+ til::SExpr *BE = translate(IVRE->getBase(), Ctx);
+ til::SExpr *E = new (Arena) til::SApply(BE);
+
+ const auto *D = cast<ObjCIvarDecl>(IVRE->getDecl()->getCanonicalDecl());
+
+ til::Project *P = new (Arena) til::Project(E, D);
+ if (hasAnyPointerType(BE))
P->setArrow(true);
return P;
}
@@ -354,15 +372,17 @@ til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
const Expr *SelfE) {
if (CapabilityExprMode) {
// Handle LOCK_RETURNED
- const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
- if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
- CallingContext LRCallCtx(Ctx);
- LRCallCtx.AttrDecl = CE->getDirectCallee();
- LRCallCtx.SelfArg = SelfE;
- LRCallCtx.NumArgs = CE->getNumArgs();
- LRCallCtx.FunArgs = CE->getArgs();
- return const_cast<til::SExpr *>(
- translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
+ if (const FunctionDecl *FD = CE->getDirectCallee()) {
+ FD = FD->getMostRecentDecl();
+ if (LockReturnedAttr *At = FD->getAttr<LockReturnedAttr>()) {
+ CallingContext LRCallCtx(Ctx);
+ LRCallCtx.AttrDecl = CE->getDirectCallee();
+ LRCallCtx.SelfArg = SelfE;
+ LRCallCtx.NumArgs = CE->getNumArgs();
+ LRCallCtx.FunArgs = CE->getArgs();
+ return const_cast<til::SExpr *>(
+ translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
+ }
}
}
@@ -927,6 +947,16 @@ void SExprBuilder::exitCFG(const CFGBlock *Last) {
}
/*
+namespace {
+
+class TILPrinter :
+ public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+
+} // namespace
+
+namespace clang {
+namespace threadSafety {
+
void printSCFG(CFGWalker &Walker) {
llvm::BumpPtrAllocator Bpa;
til::MemRegionRef Arena(&Bpa);
@@ -934,4 +964,7 @@ void printSCFG(CFGWalker &Walker) {
til::SCFG *Scfg = SxBuilder.buildCFG(Walker);
TILPrinter::print(Scfg, llvm::errs());
}
+
+} // namespace threadSafety
+} // namespace clang
*/
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index 798bbfb29d7b..11f7afbd229c 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -150,7 +150,7 @@ void til::simplifyIncompleteArg(til::Phi *Ph) {
}
// Renumbers the arguments and instructions to have unique, sequential IDs.
-int BasicBlock::renumberInstrs(int ID) {
+unsigned BasicBlock::renumberInstrs(unsigned ID) {
for (auto *Arg : Args)
Arg->setID(this, ID++);
for (auto *Instr : Instrs)
@@ -163,7 +163,8 @@ int BasicBlock::renumberInstrs(int ID) {
// Each block will be written into the Blocks array in order, and its BlockID
// will be set to the index in the array. Sorting should start from the entry
// block, and ID should be the total number of blocks.
-int BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks, int ID) {
+unsigned BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks,
+ unsigned ID) {
if (Visited) return ID;
Visited = true;
for (auto *Block : successors())
@@ -186,7 +187,8 @@ int BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks, int ID) {
// critical edges, and (3) the entry block is reachable from the exit block
// and no blocks are accessible via traversal of back-edges from the exit that
// weren't accessible via forward edges from the entry.
-int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+unsigned BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock *> &Blocks,
+ unsigned ID) {
// Visited is assumed to have been set by the topologicalSort. This pass
// assumes !Visited means that we've visited this node before.
if (!Visited) return ID;
@@ -257,7 +259,7 @@ void BasicBlock::computePostDominator() {
// Renumber instructions in all blocks
void SCFG::renumberInstrs() {
- int InstrID = 0;
+ unsigned InstrID = 0;
for (auto *Block : Blocks)
InstrID = Block->renumberInstrs(InstrID);
}
@@ -288,11 +290,11 @@ static inline void computeNodeID(BasicBlock *B,
// 3) Topologically sorting the blocks into the "Blocks" array.
void SCFG::computeNormalForm() {
// Topologically sort the blocks starting from the entry block.
- int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
+ unsigned NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
if (NumUnreachableBlocks > 0) {
// If there were unreachable blocks shift everything down, and delete them.
- for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
- size_t NI = I - NumUnreachableBlocks;
+ for (unsigned I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
+ unsigned NI = I - NumUnreachableBlocks;
Blocks[NI] = Blocks[I];
Blocks[NI]->BlockID = NI;
// FIXME: clean up predecessor pointers to unreachable blocks?
@@ -305,7 +307,7 @@ void SCFG::computeNormalForm() {
Block->computeDominator();
// Once dominators have been computed, the final sort may be performed.
- int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
+ unsigned NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
assert(static_cast<size_t>(NumBlocks) == Blocks.size());
(void) NumBlocks;
diff --git a/lib/Basic/Attributes.cpp b/lib/Basic/Attributes.cpp
index b7570d03c85a..9a8eb3d932cc 100644
--- a/lib/Basic/Attributes.cpp
+++ b/lib/Basic/Attributes.cpp
@@ -12,9 +12,16 @@ int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
Name = Name.substr(2, Name.size() - 4);
+ // Normalize the scope name, but only for gnu and clang attributes.
+ StringRef ScopeName = Scope ? Scope->getName() : "";
+ if (ScopeName == "__gnu__")
+ ScopeName = "gnu";
+ else if (ScopeName == "_Clang")
+ ScopeName = "clang";
+
#include "clang/Basic/AttrHasAttributeImpl.inc"
- return 0;
+ return 0;
}
const char *attr::getSubjectMatchRuleSpelling(attr::SubjectMatchRule Rule) {
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index a3210ba09068..7e7f67ca874e 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -68,7 +68,7 @@ bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
bool GnuModeUnsupported = !LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG);
bool MSModeUnsupported =
!LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG);
- bool ObjCUnsupported = !LangOpts.ObjC1 && BuiltinInfo.Langs == OBJC_LANG;
+ bool ObjCUnsupported = !LangOpts.ObjC && BuiltinInfo.Langs == OBJC_LANG;
bool OclC1Unsupported = (LangOpts.OpenCLVersion / 100) != 1 &&
(BuiltinInfo.Langs & ALL_OCLC_LANGUAGES ) == OCLC1X_LANG;
bool OclC2Unsupported = LangOpts.OpenCLVersion != 200 &&
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index e82f451dea17..8b3aa0f1e972 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -48,12 +48,14 @@ add_clang_library(clangBasic
Attributes.cpp
Builtins.cpp
CharInfo.cpp
+ CodeGenOptions.cpp
Cuda.cpp
Diagnostic.cpp
DiagnosticIDs.cpp
DiagnosticOptions.cpp
FileManager.cpp
FileSystemStatCache.cpp
+ FixedPoint.cpp
IdentifierTable.cpp
LangOptions.cpp
MemoryBufferCache.cpp
@@ -70,6 +72,7 @@ add_clang_library(clangBasic
Targets.cpp
Targets/AArch64.cpp
Targets/AMDGPU.cpp
+ Targets/ARC.cpp
Targets/ARM.cpp
Targets/AVR.cpp
Targets/BPF.cpp
@@ -79,7 +82,6 @@ add_clang_library(clangBasic
Targets/MSP430.cpp
Targets/Mips.cpp
Targets/NVPTX.cpp
- Targets/Nios2.cpp
Targets/OSTargets.cpp
Targets/PNaCl.cpp
Targets/PPC.cpp
@@ -93,7 +95,6 @@ add_clang_library(clangBasic
Targets/XCore.cpp
TokenKinds.cpp
Version.cpp
- VirtualFileSystem.cpp
Warnings.cpp
XRayInstr.cpp
XRayLists.cpp
diff --git a/lib/Frontend/CodeGenOptions.cpp b/lib/Basic/CodeGenOptions.cpp
index 84a39f2d570d..aface1cd4bf9 100644
--- a/lib/Frontend/CodeGenOptions.cpp
+++ b/lib/Basic/CodeGenOptions.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include <string.h>
namespace clang {
@@ -15,7 +15,7 @@ namespace clang {
CodeGenOptions::CodeGenOptions() {
#define CODEGENOPT(Name, Bits, Default) Name = Default;
#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
-#include "clang/Frontend/CodeGenOptions.def"
+#include "clang/Basic/CodeGenOptions.def"
RelocationModel = llvm::Reloc::PIC_;
memcpy(CoverageVersion, "402*", 4);
diff --git a/lib/Basic/Cuda.cpp b/lib/Basic/Cuda.cpp
index dc7e61c02b24..6c34856dfdf7 100644
--- a/lib/Basic/Cuda.cpp
+++ b/lib/Basic/Cuda.cpp
@@ -22,6 +22,8 @@ const char *CudaVersionToString(CudaVersion V) {
return "9.1";
case CudaVersion::CUDA_92:
return "9.2";
+ case CudaVersion::CUDA_100:
+ return "10.0";
}
llvm_unreachable("invalid enum");
}
@@ -60,6 +62,8 @@ const char *CudaArchToString(CudaArch A) {
return "sm_70";
case CudaArch::SM_72:
return "sm_72";
+ case CudaArch::SM_75:
+ return "sm_75";
case CudaArch::GFX600: // tahiti
return "gfx600";
case CudaArch::GFX601: // pitcairn, verde, oland,hainan
@@ -86,6 +90,12 @@ const char *CudaArchToString(CudaArch A) {
return "gfx900";
case CudaArch::GFX902: // TBA
return "gfx902";
+ case CudaArch::GFX904: // TBA
+ return "gfx904";
+ case CudaArch::GFX906: // TBA
+ return "gfx906";
+ case CudaArch::GFX909: // TBA
+ return "gfx909";
}
llvm_unreachable("invalid enum");
}
@@ -106,6 +116,7 @@ CudaArch StringToCudaArch(llvm::StringRef S) {
.Case("sm_62", CudaArch::SM_62)
.Case("sm_70", CudaArch::SM_70)
.Case("sm_72", CudaArch::SM_72)
+ .Case("sm_75", CudaArch::SM_75)
.Case("gfx600", CudaArch::GFX600)
.Case("gfx601", CudaArch::GFX601)
.Case("gfx700", CudaArch::GFX700)
@@ -119,6 +130,9 @@ CudaArch StringToCudaArch(llvm::StringRef S) {
.Case("gfx810", CudaArch::GFX810)
.Case("gfx900", CudaArch::GFX900)
.Case("gfx902", CudaArch::GFX902)
+ .Case("gfx904", CudaArch::GFX904)
+ .Case("gfx906", CudaArch::GFX906)
+ .Case("gfx909", CudaArch::GFX909)
.Default(CudaArch::UNKNOWN);
}
@@ -152,6 +166,8 @@ const char *CudaVirtualArchToString(CudaVirtualArch A) {
return "compute_70";
case CudaVirtualArch::COMPUTE_72:
return "compute_72";
+ case CudaVirtualArch::COMPUTE_75:
+ return "compute_75";
case CudaVirtualArch::COMPUTE_AMDGCN:
return "compute_amdgcn";
}
@@ -173,6 +189,7 @@ CudaVirtualArch StringToCudaVirtualArch(llvm::StringRef S) {
.Case("compute_62", CudaVirtualArch::COMPUTE_62)
.Case("compute_70", CudaVirtualArch::COMPUTE_70)
.Case("compute_72", CudaVirtualArch::COMPUTE_72)
+ .Case("compute_75", CudaVirtualArch::COMPUTE_75)
.Case("compute_amdgcn", CudaVirtualArch::COMPUTE_AMDGCN)
.Default(CudaVirtualArch::UNKNOWN);
}
@@ -210,6 +227,8 @@ CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
return CudaVirtualArch::COMPUTE_70;
case CudaArch::SM_72:
return CudaVirtualArch::COMPUTE_72;
+ case CudaArch::SM_75:
+ return CudaVirtualArch::COMPUTE_75;
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
@@ -223,6 +242,9 @@ CudaVirtualArch VirtualArchForCudaArch(CudaArch A) {
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
return CudaVirtualArch::COMPUTE_AMDGCN;
}
llvm_unreachable("invalid enum");
@@ -252,6 +274,8 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_90;
case CudaArch::SM_72:
return CudaVersion::CUDA_91;
+ case CudaArch::SM_75:
+ return CudaVersion::CUDA_100;
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX700:
@@ -265,6 +289,9 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
return CudaVersion::CUDA_70;
}
llvm_unreachable("invalid enum");
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index f1ebd9d38b9e..56c54cb9070c 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -89,6 +89,14 @@ DiagnosticsEngine::~DiagnosticsEngine() {
setClient(nullptr);
}
+void DiagnosticsEngine::dump() const {
+ DiagStatesByLoc.dump(*SourceMgr);
+}
+
+void DiagnosticsEngine::dump(StringRef DiagName) const {
+ DiagStatesByLoc.dump(*SourceMgr, DiagName);
+}
+
void DiagnosticsEngine::setClient(DiagnosticConsumer *client,
bool ShouldOwnClient) {
Owner.reset(ShouldOwnClient ? client : nullptr);
@@ -239,7 +247,7 @@ DiagnosticsEngine::DiagStateMap::getFile(SourceManager &SrcMgr,
void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr,
StringRef DiagName) const {
llvm::errs() << "diagnostic state at ";
- CurDiagStateLoc.dump(SrcMgr);
+ CurDiagStateLoc.print(llvm::errs(), SrcMgr);
llvm::errs() << ": " << CurDiagState << "\n";
for (auto &F : Files) {
@@ -261,7 +269,7 @@ void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr,
<< Decomp.first.getHashValue() << "> ";
SrcMgr.getLocForStartOfFile(Decomp.first)
.getLocWithOffset(Decomp.second)
- .dump(SrcMgr);
+ .print(llvm::errs(), SrcMgr);
}
if (File.HasLocalTransitions)
llvm::errs() << " has_local_transitions";
@@ -281,7 +289,7 @@ void DiagnosticsEngine::DiagStateMap::dump(SourceManager &SrcMgr,
llvm::errs() << " ";
SrcMgr.getLocForStartOfFile(ID)
.getLocWithOffset(Transition.Offset)
- .dump(SrcMgr);
+ .print(llvm::errs(), SrcMgr);
llvm::errs() << ": state " << Transition.State << ":\n";
};
@@ -975,6 +983,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
break;
}
+ case DiagnosticsEngine::ak_qual:
case DiagnosticsEngine::ak_qualtype:
case DiagnosticsEngine::ak_declarationname:
case DiagnosticsEngine::ak_nameddecl:
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index d339b972ae8e..f5a2d4894c13 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -49,7 +49,7 @@ using namespace clang;
//===----------------------------------------------------------------------===//
FileManager::FileManager(const FileSystemOptions &FSO,
- IntrusiveRefCntPtr<vfs::FileSystem> FS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
: FS(std::move(FS)), FileSystemOpts(FSO), SeenDirEntries(64),
SeenFileEntries(64), NextFileUID(0) {
NumDirLookups = NumFileLookups = 0;
@@ -58,49 +58,17 @@ FileManager::FileManager(const FileSystemOptions &FSO,
// If the caller doesn't provide a virtual file system, just grab the real
// file system.
if (!this->FS)
- this->FS = vfs::getRealFileSystem();
+ this->FS = llvm::vfs::getRealFileSystem();
}
FileManager::~FileManager() = default;
-void FileManager::addStatCache(std::unique_ptr<FileSystemStatCache> statCache,
- bool AtBeginning) {
+void FileManager::setStatCache(std::unique_ptr<FileSystemStatCache> statCache) {
assert(statCache && "No stat cache provided?");
- if (AtBeginning || !StatCache.get()) {
- statCache->setNextStatCache(std::move(StatCache));
- StatCache = std::move(statCache);
- return;
- }
-
- FileSystemStatCache *LastCache = StatCache.get();
- while (LastCache->getNextStatCache())
- LastCache = LastCache->getNextStatCache();
-
- LastCache->setNextStatCache(std::move(statCache));
-}
-
-void FileManager::removeStatCache(FileSystemStatCache *statCache) {
- if (!statCache)
- return;
-
- if (StatCache.get() == statCache) {
- // This is the first stat cache.
- StatCache = StatCache->takeNextStatCache();
- return;
- }
-
- // Find the stat cache in the list.
- FileSystemStatCache *PrevCache = StatCache.get();
- while (PrevCache && PrevCache->getNextStatCache() != statCache)
- PrevCache = PrevCache->getNextStatCache();
-
- assert(PrevCache && "Stat cache not found for removal");
- PrevCache->setNextStatCache(statCache->takeNextStatCache());
+ StatCache = std::move(statCache);
}
-void FileManager::clearStatCaches() {
- StatCache.reset();
-}
+void FileManager::clearStatCache() { StatCache.reset(); }
/// Retrieve the directory that the given file name resides in.
/// Filename can point to either a real file or a virtual file.
@@ -221,15 +189,21 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
*SeenFileEntries.insert(std::make_pair(Filename, nullptr)).first;
// See if there is already an entry in the map.
- if (NamedFileEnt.second)
- return NamedFileEnt.second == NON_EXISTENT_FILE ? nullptr
- : NamedFileEnt.second;
+ if (NamedFileEnt.second) {
+ if (NamedFileEnt.second == NON_EXISTENT_FILE)
+ return nullptr;
+ // Entry exists: return it *unless* it wasn't opened and open is requested.
+ if (!(NamedFileEnt.second->DeferredOpen && openFile))
+ return NamedFileEnt.second;
+ // We previously stat()ed the file, but didn't open it: do that below.
+ // FIXME: the below does other redundant work too (stats the dir and file).
+ } else {
+ // By default, initialize it to invalid.
+ NamedFileEnt.second = NON_EXISTENT_FILE;
+ }
++NumFileCacheMisses;
- // By default, initialize it to invalid.
- NamedFileEnt.second = NON_EXISTENT_FILE;
-
// Get the null-terminated file name as stored as the key of the
// SeenFileEntries map.
StringRef InterndFileName = NamedFileEnt.first();
@@ -252,7 +226,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// FIXME: This will reduce the # syscalls.
// Nope, there isn't. Check to see if the file exists.
- std::unique_ptr<vfs::File> F;
+ std::unique_ptr<llvm::vfs::File> F;
FileData Data;
if (getStatValue(InterndFileName, Data, true, openFile ? &F : nullptr)) {
// There's no real file at the given path.
@@ -267,6 +241,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
FileEntry &UFE = UniqueRealFiles[Data.UniqueID];
+ UFE.DeferredOpen = !openFile;
NamedFileEnt.second = &UFE;
@@ -283,6 +258,15 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
InterndFileName = NamedFileEnt.first().data();
}
+ // If we opened the file for the first time, record the resulting info.
+ // Do this even if the cache entry was valid, maybe we didn't previously open.
+ if (F && !UFE.File) {
+ if (auto PathName = F->getName())
+ fillRealPathName(&UFE, *PathName);
+ UFE.File = std::move(F);
+ assert(!UFE.DeferredOpen && "we just opened it!");
+ }
+
if (UFE.isValid()) { // Already have an entry with this inode, return it.
// FIXME: this hack ensures that if we look up a file by a virtual path in
@@ -313,11 +297,9 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
UFE.UniqueID = Data.UniqueID;
UFE.IsNamedPipe = Data.IsNamedPipe;
UFE.InPCH = Data.InPCH;
- UFE.File = std::move(F);
UFE.IsValid = true;
- if (UFE.File)
- if (auto RealPathName = UFE.File->getName())
- UFE.RealPathName = *RealPathName;
+ // Note File and DeferredOpen were initialized above.
+
return &UFE;
}
@@ -373,6 +355,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->UniqueID = Data.UniqueID;
UFE->IsNamedPipe = Data.IsNamedPipe;
UFE->InPCH = Data.InPCH;
+ fillRealPathName(UFE, Data.Name);
}
if (!UFE) {
@@ -388,6 +371,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->UID = NextFileUID++;
UFE->IsValid = true;
UFE->File.reset();
+ UFE->DeferredOpen = false;
return UFE;
}
@@ -415,6 +399,17 @@ bool FileManager::makeAbsolutePath(SmallVectorImpl<char> &Path) const {
return Changed;
}
+void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
+ llvm::SmallString<128> AbsPath(FileName);
+ // This is not the same as `VFS::getRealPath()`, which resolves symlinks
+ // but can be very expensive on real file systems.
+ // FIXME: the semantic of RealPathName is unclear, and the name might be
+ // misleading. We need to clean up the interface here.
+ makeAbsolutePath(AbsPath);
+ llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
+ UFE->RealPathName = AbsPath.str();
+}
+
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
bool ShouldCloseOpenFile) {
@@ -465,7 +460,7 @@ FileManager::getBufferForFile(StringRef Filename, bool isVolatile) {
/// false if it's an existent real file. If FileDescriptor is NULL,
/// do directory look-up instead of file look-up.
bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F) {
+ std::unique_ptr<llvm::vfs::File> *F) {
// FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
// absolute!
if (FileSystemOpts.WorkingDir.empty())
@@ -479,11 +474,11 @@ bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
}
bool FileManager::getNoncachedStatValue(StringRef Path,
- vfs::Status &Result) {
+ llvm::vfs::Status &Result) {
SmallString<128> FilePath(Path);
FixupRelativePath(FilePath);
- llvm::ErrorOr<vfs::Status> S = FS->status(FilePath.c_str());
+ llvm::ErrorOr<llvm::vfs::Status> S = FS->status(FilePath.c_str());
if (!S)
return true;
Result = *S;
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index f5856cb6542a..6f2eef4e2062 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -12,17 +12,17 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <utility>
using namespace clang;
void FileSystemStatCache::anchor() {}
-static void copyStatusToFileData(const vfs::Status &Status,
+static void copyStatusToFileData(const llvm::vfs::Status &Status,
FileData &Data) {
Data.Name = Status.getName();
Data.Size = Status.getSize();
@@ -44,8 +44,9 @@ static void copyStatusToFileData(const vfs::Status &Status,
/// implementation can optionally fill in FileDescriptor with a valid
/// descriptor and the client guarantees that it will close it.
bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- FileSystemStatCache *Cache, vfs::FileSystem &FS) {
+ std::unique_ptr<llvm::vfs::File> *F,
+ FileSystemStatCache *Cache,
+ llvm::vfs::FileSystem &FS) {
LookupResult R;
bool isForDir = !isFile;
@@ -55,7 +56,7 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
else if (isForDir || !F) {
// If this is a directory or a file descriptor is not needed and we have
// no cache, just go to the file system.
- llvm::ErrorOr<vfs::Status> Status = FS.status(Path);
+ llvm::ErrorOr<llvm::vfs::Status> Status = FS.status(Path);
if (!Status) {
R = CacheMissing;
} else {
@@ -79,7 +80,7 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
// Otherwise, the open succeeded. Do an fstat to get the information
// about the file. We'll end up returning the open file descriptor to the
// client to do what they please with it.
- llvm::ErrorOr<vfs::Status> Status = (*OwnedFile)->status();
+ llvm::ErrorOr<llvm::vfs::Status> Status = (*OwnedFile)->status();
if (Status) {
R = CacheExists;
copyStatusToFileData(*Status, Data);
@@ -111,19 +112,19 @@ bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
MemorizeStatCalls::LookupResult
MemorizeStatCalls::getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F, vfs::FileSystem &FS) {
- LookupResult Result = statChained(Path, Data, isFile, F, FS);
-
- // Do not cache failed stats, it is easy to construct common inconsistent
- // situations if we do, and they are not important for PCH performance (which
- // currently only needs the stats to construct the initial FileManager
- // entries).
- if (Result == CacheMissing)
- return Result;
+ std::unique_ptr<llvm::vfs::File> *F,
+ llvm::vfs::FileSystem &FS) {
+ if (get(Path, Data, isFile, F, nullptr, FS)) {
+ // Do not cache failed stats, it is easy to construct common inconsistent
+ // situations if we do, and they are not important for PCH performance
+ // (which currently only needs the stats to construct the initial
+ // FileManager entries).
+ return CacheMissing;
+ }
// Cache file 'stat' results and directories with absolutely paths.
if (!Data.IsDirectory || llvm::sys::path::is_absolute(Path))
StatCalls[Path] = Data;
- return Result;
+ return CacheExists;
}
diff --git a/lib/Basic/FixedPoint.cpp b/lib/Basic/FixedPoint.cpp
new file mode 100644
index 000000000000..bfff0fc212e0
--- /dev/null
+++ b/lib/Basic/FixedPoint.cpp
@@ -0,0 +1,115 @@
+//===- FixedPoint.cpp - Fixed point constant handling -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Defines the implementation for the fixed point number interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FixedPoint.h"
+
+namespace clang {
+
+APFixedPoint APFixedPoint::convert(const FixedPointSemantics &DstSema) const {
+ llvm::APSInt NewVal = Val;
+ unsigned DstWidth = DstSema.getWidth();
+ unsigned DstScale = DstSema.getScale();
+ bool Upscaling = DstScale > getScale();
+
+ if (Upscaling) {
+ NewVal = NewVal.extend(NewVal.getBitWidth() + DstScale - getScale());
+ NewVal <<= (DstScale - getScale());
+ } else {
+ NewVal >>= (getScale() - DstScale);
+ }
+
+ if (DstSema.isSaturated()) {
+ auto Mask = llvm::APInt::getBitsSetFrom(
+ NewVal.getBitWidth(),
+ std::min(DstScale + DstSema.getIntegralBits(), NewVal.getBitWidth()));
+ llvm::APInt Masked(NewVal & Mask);
+
+ // Change in the bits above the sign
+ if (!(Masked == Mask || Masked == 0))
+ NewVal = NewVal.isNegative() ? Mask : ~Mask;
+
+ if (!DstSema.isSigned() && NewVal.isNegative())
+ NewVal = 0;
+ }
+
+ NewVal = NewVal.extOrTrunc(DstWidth);
+ NewVal.setIsSigned(DstSema.isSigned());
+ return APFixedPoint(NewVal, DstSema);
+}
+
+int APFixedPoint::compare(const APFixedPoint &Other) const {
+ llvm::APSInt ThisVal = getValue();
+ llvm::APSInt OtherVal = Other.getValue();
+ bool ThisSigned = Val.isSigned();
+ bool OtherSigned = OtherVal.isSigned();
+ unsigned OtherScale = Other.getScale();
+ unsigned OtherWidth = OtherVal.getBitWidth();
+
+ unsigned CommonWidth = std::max(Val.getBitWidth(), OtherWidth);
+
+ // Prevent overflow in the event the widths are the same but the scales differ
+ CommonWidth += getScale() >= OtherScale ? getScale() - OtherScale
+ : OtherScale - getScale();
+
+ ThisVal = ThisVal.extOrTrunc(CommonWidth);
+ OtherVal = OtherVal.extOrTrunc(CommonWidth);
+
+ unsigned CommonScale = std::max(getScale(), OtherScale);
+ ThisVal = ThisVal.shl(CommonScale - getScale());
+ OtherVal = OtherVal.shl(CommonScale - OtherScale);
+
+ if (ThisSigned && OtherSigned) {
+ if (ThisVal.sgt(OtherVal))
+ return 1;
+ else if (ThisVal.slt(OtherVal))
+ return -1;
+ } else if (!ThisSigned && !OtherSigned) {
+ if (ThisVal.ugt(OtherVal))
+ return 1;
+ else if (ThisVal.ult(OtherVal))
+ return -1;
+ } else if (ThisSigned && !OtherSigned) {
+ if (ThisVal.isSignBitSet())
+ return -1;
+ else if (ThisVal.ugt(OtherVal))
+ return 1;
+ else if (ThisVal.ult(OtherVal))
+ return -1;
+ } else {
+ // !ThisSigned && OtherSigned
+ if (OtherVal.isSignBitSet())
+ return 1;
+ else if (ThisVal.ugt(OtherVal))
+ return 1;
+ else if (ThisVal.ult(OtherVal))
+ return -1;
+ }
+
+ return 0;
+}
+
+APFixedPoint APFixedPoint::getMax(const FixedPointSemantics &Sema) {
+ bool IsUnsigned = !Sema.isSigned();
+ auto Val = llvm::APSInt::getMaxValue(Sema.getWidth(), IsUnsigned);
+ if (IsUnsigned && Sema.hasUnsignedPadding())
+ Val = Val.lshr(1);
+ return APFixedPoint(Val, Sema);
+}
+
+APFixedPoint APFixedPoint::getMin(const FixedPointSemantics &Sema) {
+ auto Val = llvm::APSInt::getMinValue(Sema.getWidth(), !Sema.isSigned());
+ return APFixedPoint(Val, Sema);
+}
+
+} // namespace clang
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 7ec3cb7dd65b..b961c8333bd7 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -34,28 +34,6 @@
using namespace clang;
//===----------------------------------------------------------------------===//
-// IdentifierInfo Implementation
-//===----------------------------------------------------------------------===//
-
-IdentifierInfo::IdentifierInfo() {
- TokenID = tok::identifier;
- ObjCOrBuiltinID = 0;
- HasMacro = false;
- HadMacro = false;
- IsExtension = false;
- IsFutureCompatKeyword = false;
- IsPoisoned = false;
- IsCPPOperatorKeyword = false;
- NeedsHandleIdentifier = false;
- IsFromAST = false;
- ChangedAfterLoad = false;
- FEChangedAfterLoad = false;
- RevertedTokenID = false;
- OutOfDate = false;
- IsModulesImport = false;
-}
-
-//===----------------------------------------------------------------------===//
// IdentifierTable Implementation
//===----------------------------------------------------------------------===//
@@ -99,30 +77,29 @@ IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
namespace {
enum {
- KEYC99 = 0x1,
- KEYCXX = 0x2,
- KEYCXX11 = 0x4,
- KEYGNU = 0x8,
- KEYMS = 0x10,
- BOOLSUPPORT = 0x20,
- KEYALTIVEC = 0x40,
- KEYNOCXX = 0x80,
- KEYBORLAND = 0x100,
- KEYOPENCLC = 0x200,
- KEYC11 = 0x400,
- KEYARC = 0x800,
- KEYNOMS18 = 0x01000,
- KEYNOOPENCL = 0x02000,
- WCHARSUPPORT = 0x04000,
- HALFSUPPORT = 0x08000,
- CHAR8SUPPORT = 0x10000,
- KEYCONCEPTS = 0x20000,
- KEYOBJC2 = 0x40000,
- KEYZVECTOR = 0x80000,
- KEYCOROUTINES = 0x100000,
- KEYMODULES = 0x200000,
- KEYCXX2A = 0x400000,
- KEYOPENCLCXX = 0x800000,
+ KEYC99 = 0x1,
+ KEYCXX = 0x2,
+ KEYCXX11 = 0x4,
+ KEYGNU = 0x8,
+ KEYMS = 0x10,
+ BOOLSUPPORT = 0x20,
+ KEYALTIVEC = 0x40,
+ KEYNOCXX = 0x80,
+ KEYBORLAND = 0x100,
+ KEYOPENCLC = 0x200,
+ KEYC11 = 0x400,
+ KEYNOMS18 = 0x800,
+ KEYNOOPENCL = 0x1000,
+ WCHARSUPPORT = 0x2000,
+ HALFSUPPORT = 0x4000,
+ CHAR8SUPPORT = 0x8000,
+ KEYCONCEPTS = 0x10000,
+ KEYOBJC = 0x20000,
+ KEYZVECTOR = 0x40000,
+ KEYCOROUTINES = 0x80000,
+ KEYMODULES = 0x100000,
+ KEYCXX2A = 0x200000,
+ KEYOPENCLCXX = 0x400000,
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX2A,
KEYALL = (0xffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
@@ -155,6 +132,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
if (LangOpts.Char8 && (Flags & CHAR8SUPPORT)) return KS_Enabled;
if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
+ if (LangOpts.ZVector && (Flags & KEYZVECTOR)) return KS_Enabled;
if (LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLC))
return KS_Enabled;
if (LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLCXX)) return KS_Enabled;
@@ -162,8 +140,7 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
// We treat bridge casts as objective-C keywords so we can warn on them
// in non-arc mode.
- if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
- if (LangOpts.ObjC2 && (Flags & KEYOBJC2)) return KS_Enabled;
+ if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.CoroutinesTS && (Flags & KEYCOROUTINES)) return KS_Enabled;
if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
@@ -227,11 +204,8 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define CXX_KEYWORD_OPERATOR(NAME, ALIAS) \
if (LangOpts.CXXOperatorNames) \
AddCXXOperatorKeyword(StringRef(#NAME), tok::ALIAS, *this);
-#define OBJC1_AT_KEYWORD(NAME) \
- if (LangOpts.ObjC1) \
- AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
-#define OBJC2_AT_KEYWORD(NAME) \
- if (LangOpts.ObjC2) \
+#define OBJC_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
@@ -382,24 +356,23 @@ unsigned llvm::DenseMapInfo<clang::Selector>::getHashValue(clang::Selector S) {
namespace clang {
-/// MultiKeywordSelector - One of these variable length records is kept for each
+/// One of these variable length records is kept for each
/// selector containing more than one keyword. We use a folding set
/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
/// this class is provided strictly through Selector.
-class MultiKeywordSelector
- : public DeclarationNameExtra, public llvm::FoldingSetNode {
- MultiKeywordSelector(unsigned nKeys) {
- ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
- }
+class alignas(IdentifierInfoAlignment) MultiKeywordSelector
+ : public detail::DeclarationNameExtra,
+ public llvm::FoldingSetNode {
+ MultiKeywordSelector(unsigned nKeys) : DeclarationNameExtra(nKeys) {}
public:
// Constructor for keyword selectors.
- MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV)
+ : DeclarationNameExtra(nKeys) {
assert((nKeys > 1) && "not a multi-keyword selector");
- ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
// Fill in the trailing keyword array.
- IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this+1);
+ IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this + 1);
for (unsigned i = 0; i != nKeys; ++i)
KeyInfo[i] = IIV[i];
}
@@ -407,16 +380,16 @@ public:
// getName - Derive the full selector name and return it.
std::string getName() const;
- unsigned getNumArgs() const { return ExtraKindOrNumArgs - NUM_EXTRA_KINDS; }
+ using DeclarationNameExtra::getNumArgs;
using keyword_iterator = IdentifierInfo *const *;
keyword_iterator keyword_begin() const {
- return reinterpret_cast<keyword_iterator>(this+1);
+ return reinterpret_cast<keyword_iterator>(this + 1);
}
keyword_iterator keyword_end() const {
- return keyword_begin()+getNumArgs();
+ return keyword_begin() + getNumArgs();
}
IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
@@ -424,8 +397,8 @@ public:
return keyword_begin()[i];
}
- static void Profile(llvm::FoldingSetNodeID &ID,
- keyword_iterator ArgTys, unsigned NumArgs) {
+ static void Profile(llvm::FoldingSetNodeID &ID, keyword_iterator ArgTys,
+ unsigned NumArgs) {
ID.AddInteger(NumArgs);
for (unsigned i = 0; i != NumArgs; ++i)
ID.AddPointer(ArgTys[i]);
@@ -462,7 +435,7 @@ IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
StringRef Selector::getNameForSlot(unsigned int argIndex) const {
IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
- return II? II->getName() : StringRef();
+ return II ? II->getName() : StringRef();
}
std::string MultiKeywordSelector::getName() const {
@@ -584,6 +557,7 @@ ObjCInstanceTypeFamily Selector::getInstTypeMethodFamily(Selector sel) {
break;
case 'i':
if (startsWithWord(name, "init")) return OIT_Init;
+ break;
default:
break;
}
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 1a0c19059058..fd552f2baaca 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -71,6 +71,37 @@ Module::~Module() {
}
}
+static bool isPlatformEnvironment(const TargetInfo &Target, StringRef Feature) {
+ StringRef Platform = Target.getPlatformName();
+ StringRef Env = Target.getTriple().getEnvironmentName();
+
+ // Attempt to match platform and environment.
+ if (Platform == Feature || Target.getTriple().getOSName() == Feature ||
+ Env == Feature)
+ return true;
+
+ auto CmpPlatformEnv = [](StringRef LHS, StringRef RHS) {
+ auto Pos = LHS.find("-");
+ if (Pos == StringRef::npos)
+ return false;
+ SmallString<128> NewLHS = LHS.slice(0, Pos);
+ NewLHS += LHS.slice(Pos+1, LHS.size());
+ return NewLHS == RHS;
+ };
+
+ SmallString<128> PlatformEnv = Target.getTriple().getOSAndEnvironmentName();
+ // Darwin has different but equivalent variants for simulators, example:
+ // 1. x86_64-apple-ios-simulator
+ // 2. x86_64-apple-iossimulator
+ // where both are valid examples of the same platform+environment but in the
+ // variant (2) the simulator is hardcoded as part of the platform name. Both
+ // forms above should match for "iossimulator" requirement.
+ if (Target.getTriple().isOSDarwin() && PlatformEnv.endswith("simulator"))
+ return PlatformEnv == Feature || CmpPlatformEnv(PlatformEnv, Feature);
+
+ return PlatformEnv == Feature;
+}
+
/// Determine whether a translation unit built using the current
/// language options has the given feature.
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
@@ -88,12 +119,13 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("c17", LangOpts.C17)
.Case("freestanding", LangOpts.Freestanding)
.Case("gnuinlineasm", LangOpts.GNUAsm)
- .Case("objc", LangOpts.ObjC1)
+ .Case("objc", LangOpts.ObjC)
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
.Case("opencl", LangOpts.OpenCL)
.Case("tls", Target.isTLSSupported())
.Case("zvector", LangOpts.ZVector)
- .Default(Target.hasFeature(Feature));
+ .Default(Target.hasFeature(Feature) ||
+ isPlatformEnvironment(Target, Feature));
if (!HasFeature)
HasFeature = std::find(LangOpts.ModuleFeatures.begin(),
LangOpts.ModuleFeatures.end(),
@@ -577,10 +609,6 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
};
std::function<void(Visiting)> VisitModule = [&](Visiting V) {
- // Modules that aren't available cannot be made visible.
- if (!V.M->isAvailable())
- return;
-
// Nothing to do for a module that's already visible.
unsigned ID = V.M->getVisibilityID();
if (ImportLocs.size() <= ID)
@@ -594,8 +622,11 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
// Make any exported modules visible.
SmallVector<Module *, 16> Exports;
V.M->getExportedModules(Exports);
- for (Module *E : Exports)
- VisitModule({E, &V});
+ for (Module *E : Exports) {
+ // Don't recurse to unavailable submodules.
+ if (E->isAvailable())
+ VisitModule({E, &V});
+ }
for (auto &C : V.M->Conflicts) {
if (isVisible(C.Other)) {
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 67b7d91e6292..a5bfac86e610 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -108,8 +108,11 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_LINEAR_unknown);
case OMPC_map:
- return llvm::StringSwitch<OpenMPMapClauseKind>(Str)
-#define OPENMP_MAP_KIND(Name) .Case(#Name, OMPC_MAP_##Name)
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_MAP_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_MAP_##Name))
+#define OPENMP_MAP_MODIFIER_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_MAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_MAP_unknown);
case OMPC_dist_schedule:
@@ -125,6 +128,12 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
.Case(#Name, static_cast<unsigned>(OMPC_DEFAULTMAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_DEFAULTMAP_unknown);
+ case OMPC_atomic_default_mem_order:
+ return llvm::StringSwitch<OpenMPAtomicDefaultMemOrderClauseKind>(Str)
+#define OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(Name) \
+ .Case(#Name, OMPC_ATOMIC_DEFAULT_MEM_ORDER_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown);
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -168,6 +177,10 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -233,10 +246,14 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_map:
switch (Type) {
case OMPC_MAP_unknown:
+ case OMPC_MAP_MODIFIER_last:
return "unknown";
#define OPENMP_MAP_KIND(Name) \
case OMPC_MAP_##Name: \
return #Name;
+#define OPENMP_MAP_MODIFIER_KIND(Name) \
+ case OMPC_MAP_MODIFIER_##Name: \
+ return #Name;
#include "clang/Basic/OpenMPKinds.def"
default:
break;
@@ -266,6 +283,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'schedule' clause type");
+ case OMPC_atomic_default_mem_order:
+ switch (Type) {
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown:
+ return "unknown";
+#define OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(Name) \
+ case OMPC_ATOMIC_DEFAULT_MEM_ORDER_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+}
+ llvm_unreachable("Invalid OpenMP 'atomic_default_mem_order' clause type");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -309,6 +336,10 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
break;
}
llvm_unreachable("Invalid OpenMP simple clause kind");
@@ -442,6 +473,16 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_requires:
+ switch (CKind) {
+#define OPENMP_REQUIRES_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_target_data:
switch (CKind) {
#define OPENMP_TARGET_DATA_CLAUSE(Name) \
@@ -961,6 +1002,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_requires:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index eb916ec76fdc..aa844f2cd26c 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -77,6 +77,61 @@ SourceLocation::printToString(const SourceManager &SM) const {
LLVM_DUMP_METHOD void SourceLocation::dump(const SourceManager &SM) const {
print(llvm::errs(), SM);
+ llvm::errs() << '\n';
+}
+
+LLVM_DUMP_METHOD void SourceRange::dump(const SourceManager &SM) const {
+ print(llvm::errs(), SM);
+ llvm::errs() << '\n';
+}
+
+static PresumedLoc PrintDifference(raw_ostream &OS, const SourceManager &SM,
+ SourceLocation Loc, PresumedLoc Previous) {
+ if (Loc.isFileID()) {
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return Previous;
+ }
+
+ if (Previous.isInvalid() ||
+ strcmp(PLoc.getFilename(), Previous.getFilename()) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine() << ':'
+ << PLoc.getColumn();
+ } else if (Previous.isInvalid() || PLoc.getLine() != Previous.getLine()) {
+ OS << "line" << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+ return PLoc;
+ }
+ auto PrintedLoc = PrintDifference(OS, SM, SM.getExpansionLoc(Loc), Previous);
+
+ OS << " <Spelling=";
+ PrintedLoc = PrintDifference(OS, SM, SM.getSpellingLoc(Loc), PrintedLoc);
+ OS << '>';
+ return PrintedLoc;
+}
+
+void SourceRange::print(raw_ostream &OS, const SourceManager &SM) const {
+
+ OS << '<';
+ auto PrintedLoc = PrintDifference(OS, SM, B, {});
+ if (B != E) {
+ OS << ", ";
+ PrintDifference(OS, SM, E, PrintedLoc);
+ }
+ OS << '>';
+}
+
+LLVM_DUMP_METHOD std::string
+SourceRange::printToString(const SourceManager &SM) const {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ print(OS, SM);
+ return OS.str();
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index efa6ad2493b2..ce8aa5d112b3 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -195,8 +195,7 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
}
unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
- auto IterBool =
- FilenameIDs.insert(std::make_pair(Name, FilenamesByID.size()));
+ auto IterBool = FilenameIDs.try_emplace(Name, FilenamesByID.size());
if (IterBool.second)
FilenamesByID.push_back(&*IterBool.first);
return IterBool.first->second;
@@ -1217,65 +1216,22 @@ static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
- unsigned Offs = 0;
+ unsigned I = 0;
while (true) {
// Skip over the contents of the line.
- const unsigned char *NextBuf = (const unsigned char *)Buf;
-
-#ifdef __SSE2__
- // Try to skip to the next newline using SSE instructions. This is very
- // performance sensitive for programs with lots of diagnostics and in -E
- // mode.
- __m128i CRs = _mm_set1_epi8('\r');
- __m128i LFs = _mm_set1_epi8('\n');
-
- // First fix up the alignment to 16 bytes.
- while (((uintptr_t)NextBuf & 0xF) != 0) {
- if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
- goto FoundSpecialChar;
- ++NextBuf;
- }
-
- // Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
- while (NextBuf+16 <= End) {
- const __m128i Chunk = *(const __m128i*)NextBuf;
- __m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
- _mm_cmpeq_epi8(Chunk, LFs));
- unsigned Mask = _mm_movemask_epi8(Cmp);
-
- // If we found a newline, adjust the pointer and jump to the handling code.
- if (Mask != 0) {
- NextBuf += llvm::countTrailingZeros(Mask);
- goto FoundSpecialChar;
- }
- NextBuf += 16;
- }
-#endif
-
- while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
- ++NextBuf;
-
-#ifdef __SSE2__
-FoundSpecialChar:
-#endif
- Offs += NextBuf-Buf;
- Buf = NextBuf;
-
- if (Buf[0] == '\n' || Buf[0] == '\r') {
- // If this is \n\r or \r\n, skip both characters.
- if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1]) {
- ++Offs;
- ++Buf;
- }
- ++Offs;
- ++Buf;
- LineOffsets.push_back(Offs);
+ while (Buf[I] != '\n' && Buf[I] != '\r' && Buf[I] != '\0')
+ ++I;
+
+ if (Buf[I] == '\n' || Buf[I] == '\r') {
+ // If this is \r\n, skip both characters.
+ if (Buf[I] == '\r' && Buf[I+1] == '\n')
+ ++I;
+ ++I;
+ LineOffsets.push_back(I);
} else {
- // Otherwise, this is a null. If end of file, exit.
- if (Buf == End) break;
- // Otherwise, skip the null.
- ++Offs;
- ++Buf;
+ // Otherwise, this is a NUL. If end of file, exit.
+ if (Buf+I == End) break;
+ ++I;
}
}
@@ -1965,9 +1921,7 @@ SourceManager::getDecomposedIncludedLoc(FileID FID) const {
// Uses IncludedLocMap to retrieve/cache the decomposed loc.
using DecompTy = std::pair<FileID, unsigned>;
- using MapTy = llvm::DenseMap<FileID, DecompTy>;
- std::pair<MapTy::iterator, bool>
- InsertOp = IncludedLocMap.insert(std::make_pair(FID, DecompTy()));
+ auto InsertOp = IncludedLocMap.try_emplace(FID);
DecompTy &DecompLoc = InsertOp.first->second;
if (!InsertOp.second)
return DecompLoc; // already in map.
@@ -2263,8 +2217,8 @@ SourceManagerForFile::SourceManagerForFile(StringRef FileName,
StringRef Content) {
// This is referenced by `FileMgr` and will be released by `FileMgr` when it
// is deleted.
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
InMemoryFileSystem->addFile(
FileName, 0,
llvm::MemoryBuffer::getMemBuffer(Content, FileName,
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 3400c8721f7a..269fad38b8d5 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -63,8 +63,9 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
MinGlobalAlign = 0;
// From the glibc documentation, on GNU systems, malloc guarantees 16-byte
// alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See
- // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html
- if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment())
+ // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html.
+ // This alignment guarantee also applies to Windows and Android.
+ if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid())
NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0;
else
NewAlign = 0; // Infer from basic type alignment.
@@ -684,7 +685,9 @@ bool TargetInfo::validateInputConstraint(
// FIXME: Fail if % is used with the last operand.
break;
case 'i': // immediate integer.
+ break;
case 'n': // immediate integer with a known value.
+ Info.setRequiresImmediate();
break;
case 'I': // Various constant constraints with target-specific meanings.
case 'J':
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 1ef2fe3b8141..cf87bc484621 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -16,6 +16,7 @@
#include "Targets/AArch64.h"
#include "Targets/AMDGPU.h"
+#include "Targets/ARC.h"
#include "Targets/ARM.h"
#include "Targets/AVR.h"
#include "Targets/BPF.h"
@@ -25,7 +26,6 @@
#include "Targets/MSP430.h"
#include "Targets/Mips.h"
#include "Targets/NVPTX.h"
-#include "Targets/Nios2.h"
#include "Targets/OSTargets.h"
#include "Targets/PNaCl.h"
#include "Targets/PPC.h"
@@ -124,6 +124,9 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
default:
return nullptr;
+ case llvm::Triple::arc:
+ return new ARCTargetInfo(Triple, Opts);
+
case llvm::Triple::xcore:
return new XCoreTargetInfo(Triple, Opts);
@@ -243,9 +246,6 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::msp430:
return new MSP430TargetInfo(Triple, Opts);
- case llvm::Triple::nios2:
- return new LinuxTargetInfo<Nios2TargetInfo>(Triple, Opts);
-
case llvm::Triple::mips:
switch (os) {
case llvm::Triple::Linux:
@@ -495,6 +495,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new NaClTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::ELFIAMCU:
return new MCUX86_32TargetInfo(Triple, Opts);
+ case llvm::Triple::Hurd:
+ return new HurdTargetInfo<X86_32TargetInfo>(Triple, Opts);
default:
return new X86_32TargetInfo(Triple, Opts);
}
@@ -566,17 +568,19 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::wasm32:
if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
Triple.getVendor() != llvm::Triple::UnknownVendor ||
- Triple.getOS() != llvm::Triple::UnknownOS ||
- Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
- !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
+ !Triple.isOSBinFormatWasm())
+ return nullptr;
+ if (Triple.getOS() != llvm::Triple::UnknownOS &&
+ Triple.getOS() != llvm::Triple::WASI)
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
case llvm::Triple::wasm64:
if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
Triple.getVendor() != llvm::Triple::UnknownVendor ||
- Triple.getOS() != llvm::Triple::UnknownOS ||
- Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
- !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
+ !Triple.isOSBinFormatWasm())
+ return nullptr;
+ if (Triple.getOS() != llvm::Triple::UnknownOS &&
+ Triple.getOS() != llvm::Triple::WASI)
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
@@ -640,7 +644,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Opts->Features.push_back((F.getValue() ? "+" : "-") + F.getKey().str());
// Sort here, so we handle the features in a predictable order. (This matters
// when we're dealing with features that overlap.)
- llvm::sort(Opts->Features.begin(), Opts->Features.end());
+ llvm::sort(Opts->Features);
if (!Target->handleTargetFeatures(Opts->Features, Diags))
return nullptr;
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 3444591ac593..62919a02dcb9 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -37,11 +37,11 @@ const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
- if (getTriple().getOS() == llvm::Triple::OpenBSD) {
+ if (getTriple().isOSOpenBSD()) {
Int64Type = SignedLongLong;
IntMaxType = SignedLongLong;
} else {
- if (!getTriple().isOSDarwin() && getTriple().getOS() != llvm::Triple::NetBSD)
+ if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
WCharType = UnsignedInt;
Int64Type = SignedLong;
@@ -122,10 +122,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
Builder.defineMacro("__aarch64__");
- // For bare-metal none-eabi.
+ // For bare-metal.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- (getTriple().getEnvironment() == llvm::Triple::EABI ||
- getTriple().getEnvironment() == llvm::Triple::EABIHF))
+ getTriple().isOSBinFormatELF())
Builder.defineMacro("__ELF__");
// Target properties.
@@ -195,6 +194,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasDotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if ((FPU & NeonMode) && HasFP16FML)
+ Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
+
switch (ArchKind) {
default:
break;
@@ -232,6 +234,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
Unaligned = 1;
HasFullFP16 = 0;
HasDotProd = 0;
+ HasFP16FML = 0;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
@@ -253,6 +256,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasFullFP16 = 1;
if (Feature == "+dotprod")
HasDotProd = 1;
+ if (Feature == "+fp16fml")
+ HasFP16FML = 1;
}
setDataLayout();
@@ -268,6 +273,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveMost:
case CC_PreserveAll:
case CC_OpenCLKernel:
+ case CC_AArch64VectorCall:
case CC_Win64:
return CCCR_OK;
default:
@@ -508,6 +514,7 @@ WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_Swift:
case CC_Win64:
return CCCR_OK;
default:
diff --git a/lib/Basic/Targets/AArch64.h b/lib/Basic/Targets/AArch64.h
index a9df895e4dad..d7f767abd4d1 100644
--- a/lib/Basic/Targets/AArch64.h
+++ b/lib/Basic/Targets/AArch64.h
@@ -34,6 +34,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
unsigned Unaligned;
unsigned HasFullFP16;
unsigned HasDotProd;
+ unsigned HasFP16FML;
llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
diff --git a/lib/Basic/Targets/AMDGPU.cpp b/lib/Basic/Targets/AMDGPU.cpp
index b6b9aa2f1244..7313a692f46b 100644
--- a/lib/Basic/Targets/AMDGPU.cpp
+++ b/lib/Basic/Targets/AMDGPU.cpp
@@ -13,10 +13,10 @@
#include "AMDGPU.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -127,15 +127,19 @@ bool AMDGPUTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeatureVec) const {
+ using namespace llvm::AMDGPU;
+
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
if (CPU.empty())
CPU = "gfx600";
- switch (parseAMDGCNName(CPU).Kind) {
+ switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
case GK_GFX906:
Features["dl-insts"] = true;
+ Features["dot-insts"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX909:
case GK_GFX904:
case GK_GFX902:
case GK_GFX900:
@@ -145,15 +149,18 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX803:
case GK_GFX802:
case GK_GFX801:
+ Features["vi-insts"] = true;
Features["16-bit-insts"] = true;
Features["dpp"] = true;
Features["s-memrealtime"] = true;
- break;
+ LLVM_FALLTHROUGH;
case GK_GFX704:
case GK_GFX703:
case GK_GFX702:
case GK_GFX701:
case GK_GFX700:
+ Features["ci-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX601:
case GK_GFX600:
break;
@@ -166,7 +173,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
if (CPU.empty())
CPU = "r600";
- switch (parseR600Name(CPU).Kind) {
+ switch (llvm::AMDGPU::parseArchR600(CPU)) {
case GK_CAYMAN:
case GK_CYPRESS:
case GK_RV770:
@@ -198,7 +205,7 @@ void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
TargetOptions &TargetOpts) const {
bool hasFP32Denormals = false;
bool hasFP64Denormals = false;
- GPUInfo CGOptsGPU = parseGPUName(TargetOpts.CPU);
+
for (auto &I : TargetOpts.FeaturesAsWritten) {
if (I == "+fp32-denormals" || I == "-fp32-denormals")
hasFP32Denormals = true;
@@ -207,53 +214,20 @@ void AMDGPUTargetInfo::adjustTargetOptions(const CodeGenOptions &CGOpts,
}
if (!hasFP32Denormals)
TargetOpts.Features.push_back(
- (Twine(CGOptsGPU.HasFastFMAF && !CGOpts.FlushDenorm
- ? '+'
- : '-') +
- Twine("fp32-denormals"))
+ (Twine(hasFastFMAF() && hasFullRateDenormalsF32() && !CGOpts.FlushDenorm
+ ? '+' : '-') + Twine("fp32-denormals"))
.str());
// Always do not flush fp64 or fp16 denorms.
- if (!hasFP64Denormals && CGOptsGPU.HasFP64)
+ if (!hasFP64Denormals && hasFP64())
TargetOpts.Features.push_back("+fp64-fp16-denormals");
}
-constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::InvalidGPU;
-constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::R600GPUs[];
-constexpr AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::AMDGCNGPUs[];
-
-AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseR600Name(StringRef Name) {
- const auto *Result = llvm::find_if(
- R600GPUs, [Name](const GPUInfo &GPU) { return GPU.Name == Name; });
-
- if (Result == std::end(R600GPUs))
- return InvalidGPU;
- return *Result;
-}
-
-AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseAMDGCNName(StringRef Name) {
- const auto *Result = llvm::find_if(
- AMDGCNGPUs, [Name](const GPUInfo &GPU) { return GPU.Name == Name; });
-
- if (Result == std::end(AMDGCNGPUs))
- return InvalidGPU;
- return *Result;
-}
-
-AMDGPUTargetInfo::GPUInfo AMDGPUTargetInfo::parseGPUName(StringRef Name) const {
- if (isAMDGCN(getTriple()))
- return parseAMDGCNName(Name);
- else
- return parseR600Name(Name);
-}
-
void AMDGPUTargetInfo::fillValidCPUList(
SmallVectorImpl<StringRef> &Values) const {
if (isAMDGCN(getTriple()))
- llvm::for_each(AMDGCNGPUs, [&Values](const GPUInfo &GPU) {
- Values.emplace_back(GPU.Name);});
+ llvm::AMDGPU::fillValidArchListAMDGCN(Values);
else
- llvm::for_each(R600GPUs, [&Values](const GPUInfo &GPU) {
- Values.emplace_back(GPU.Name);});
+ llvm::AMDGPU::fillValidArchListR600(Values);
}
void AMDGPUTargetInfo::setAddressSpaceMap(bool DefaultIsPrivate) {
@@ -263,7 +237,12 @@ void AMDGPUTargetInfo::setAddressSpaceMap(bool DefaultIsPrivate) {
AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple),
- GPU(isAMDGCN(Triple) ? AMDGCNGPUs[0] : parseR600Name(Opts.CPU)) {
+ GPUKind(isAMDGCN(Triple) ?
+ llvm::AMDGPU::parseArchAMDGCN(Opts.CPU) :
+ llvm::AMDGPU::parseArchR600(Opts.CPU)),
+ GPUFeatures(isAMDGCN(Triple) ?
+ llvm::AMDGPU::getArchAttrAMDGCN(GPUKind) :
+ llvm::AMDGPU::getArchAttrR600(GPUKind)) {
resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
: DataLayoutStringR600);
assert(DataLayout->getAllocaAddrSpace() == Private);
@@ -308,19 +287,22 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
else
Builder.defineMacro("__R600__");
- if (GPU.Kind != GK_NONE)
- Builder.defineMacro(Twine("__") + Twine(GPU.CanonicalName) + Twine("__"));
+ if (GPUKind != llvm::AMDGPU::GK_NONE) {
+ StringRef CanonName = isAMDGCN(getTriple()) ?
+ getArchNameAMDGCN(GPUKind) : getArchNameR600(GPUKind);
+ Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
+ }
// TODO: __HAS_FMAF__, __HAS_LDEXPF__, __HAS_FP64__ are deprecated and will be
// removed in the near future.
- if (GPU.HasFMAF)
+ if (hasFMAF())
Builder.defineMacro("__HAS_FMAF__");
- if (GPU.HasFastFMAF)
+ if (hasFastFMAF())
Builder.defineMacro("FP_FAST_FMAF");
- if (GPU.HasLDEXPF)
+ if (hasLDEXPF())
Builder.defineMacro("__HAS_LDEXPF__");
- if (GPU.HasFP64)
+ if (hasFP64())
Builder.defineMacro("__HAS_FP64__");
- if (GPU.HasFastFMA)
+ if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
}
diff --git a/lib/Basic/Targets/AMDGPU.h b/lib/Basic/Targets/AMDGPU.h
index b0221031addf..926772809aa7 100644
--- a/lib/Basic/Targets/AMDGPU.h
+++ b/lib/Basic/Targets/AMDGPU.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/TargetParser.h"
namespace clang {
namespace targets {
@@ -38,147 +39,47 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
static const LangASMap AMDGPUDefIsGenMap;
static const LangASMap AMDGPUDefIsPrivMap;
- /// GPU kinds supported by the AMDGPU target.
- enum GPUKind : uint32_t {
- // Not specified processor.
- GK_NONE = 0,
-
- // R600-based processors.
- GK_R600,
- GK_R630,
- GK_RS880,
- GK_RV670,
- GK_RV710,
- GK_RV730,
- GK_RV770,
- GK_CEDAR,
- GK_CYPRESS,
- GK_JUNIPER,
- GK_REDWOOD,
- GK_SUMO,
- GK_BARTS,
- GK_CAICOS,
- GK_CAYMAN,
- GK_TURKS,
-
- GK_R600_FIRST = GK_R600,
- GK_R600_LAST = GK_TURKS,
-
- // AMDGCN-based processors.
- GK_GFX600,
- GK_GFX601,
- GK_GFX700,
- GK_GFX701,
- GK_GFX702,
- GK_GFX703,
- GK_GFX704,
- GK_GFX801,
- GK_GFX802,
- GK_GFX803,
- GK_GFX810,
- GK_GFX900,
- GK_GFX902,
- GK_GFX904,
- GK_GFX906,
-
- GK_AMDGCN_FIRST = GK_GFX600,
- GK_AMDGCN_LAST = GK_GFX906,
- };
+ llvm::AMDGPU::GPUKind GPUKind;
+ unsigned GPUFeatures;
- struct GPUInfo {
- llvm::StringLiteral Name;
- llvm::StringLiteral CanonicalName;
- AMDGPUTargetInfo::GPUKind Kind;
- bool HasFMAF;
- bool HasFastFMAF;
- bool HasLDEXPF;
- bool HasFP64;
- bool HasFastFMA;
- };
- static constexpr GPUInfo InvalidGPU =
- {{""}, {""}, GK_NONE, false, false, false, false, false};
- static constexpr GPUInfo R600GPUs[26] = {
- // Name Canonical Kind Has Has Has Has Has
- // Name FMAF Fast LDEXPF FP64 Fast
- // FMAF FMA
- {{"r600"}, {"r600"}, GK_R600, false, false, false, false, false},
- {{"rv630"}, {"r600"}, GK_R600, false, false, false, false, false},
- {{"rv635"}, {"r600"}, GK_R600, false, false, false, false, false},
- {{"r630"}, {"r630"}, GK_R630, false, false, false, false, false},
- {{"rs780"}, {"rs880"}, GK_RS880, false, false, false, false, false},
- {{"rs880"}, {"rs880"}, GK_RS880, false, false, false, false, false},
- {{"rv610"}, {"rs880"}, GK_RS880, false, false, false, false, false},
- {{"rv620"}, {"rs880"}, GK_RS880, false, false, false, false, false},
- {{"rv670"}, {"rv670"}, GK_RV670, false, false, false, false, false},
- {{"rv710"}, {"rv710"}, GK_RV710, false, false, false, false, false},
- {{"rv730"}, {"rv730"}, GK_RV730, false, false, false, false, false},
- {{"rv740"}, {"rv770"}, GK_RV770, false, false, false, false, false},
- {{"rv770"}, {"rv770"}, GK_RV770, false, false, false, false, false},
- {{"cedar"}, {"cedar"}, GK_CEDAR, false, false, false, false, false},
- {{"palm"}, {"cedar"}, GK_CEDAR, false, false, false, false, false},
- {{"cypress"}, {"cypress"}, GK_CYPRESS, true, false, false, false, false},
- {{"hemlock"}, {"cypress"}, GK_CYPRESS, true, false, false, false, false},
- {{"juniper"}, {"juniper"}, GK_JUNIPER, false, false, false, false, false},
- {{"redwood"}, {"redwood"}, GK_REDWOOD, false, false, false, false, false},
- {{"sumo"}, {"sumo"}, GK_SUMO, false, false, false, false, false},
- {{"sumo2"}, {"sumo"}, GK_SUMO, false, false, false, false, false},
- {{"barts"}, {"barts"}, GK_BARTS, false, false, false, false, false},
- {{"caicos"}, {"caicos"}, GK_BARTS, false, false, false, false, false},
- {{"aruba"}, {"cayman"}, GK_CAYMAN, true, false, false, false, false},
- {{"cayman"}, {"cayman"}, GK_CAYMAN, true, false, false, false, false},
- {{"turks"}, {"turks"}, GK_TURKS, false, false, false, false, false},
- };
- static constexpr GPUInfo AMDGCNGPUs[32] = {
- // Name Canonical Kind Has Has Has Has Has
- // Name FMAF Fast LDEXPF FP64 Fast
- // FMAF FMA
- {{"gfx600"}, {"gfx600"}, GK_GFX600, true, true, true, true, true},
- {{"tahiti"}, {"gfx600"}, GK_GFX600, true, true, true, true, true},
- {{"gfx601"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
- {{"hainan"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
- {{"oland"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
- {{"pitcairn"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
- {{"verde"}, {"gfx601"}, GK_GFX601, true, false, true, true, true},
- {{"gfx700"}, {"gfx700"}, GK_GFX700, true, false, true, true, true},
- {{"kaveri"}, {"gfx700"}, GK_GFX700, true, false, true, true, true},
- {{"gfx701"}, {"gfx701"}, GK_GFX701, true, true, true, true, true},
- {{"hawaii"}, {"gfx701"}, GK_GFX701, true, true, true, true, true},
- {{"gfx702"}, {"gfx702"}, GK_GFX702, true, true, true, true, true},
- {{"gfx703"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
- {{"kabini"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
- {{"mullins"}, {"gfx703"}, GK_GFX703, true, false, true, true, true},
- {{"gfx704"}, {"gfx704"}, GK_GFX704, true, false, true, true, true},
- {{"bonaire"}, {"gfx704"}, GK_GFX704, true, false, true, true, true},
- {{"gfx801"}, {"gfx801"}, GK_GFX801, true, true, true, true, true},
- {{"carrizo"}, {"gfx801"}, GK_GFX801, true, true, true, true, true},
- {{"gfx802"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
- {{"iceland"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
- {{"tonga"}, {"gfx802"}, GK_GFX802, true, false, true, true, true},
- {{"gfx803"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
- {{"fiji"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
- {{"polaris10"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
- {{"polaris11"}, {"gfx803"}, GK_GFX803, true, false, true, true, true},
- {{"gfx810"}, {"gfx810"}, GK_GFX810, true, false, true, true, true},
- {{"stoney"}, {"gfx810"}, GK_GFX810, true, false, true, true, true},
- {{"gfx900"}, {"gfx900"}, GK_GFX900, true, true, true, true, true},
- {{"gfx902"}, {"gfx902"}, GK_GFX900, true, true, true, true, true},
- {{"gfx904"}, {"gfx904"}, GK_GFX904, true, true, true, true, true},
- {{"gfx906"}, {"gfx906"}, GK_GFX906, true, true, true, true, true},
- };
+ bool hasFP64() const {
+ return getTriple().getArch() == llvm::Triple::amdgcn ||
+ !!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64);
+ }
- static GPUInfo parseR600Name(StringRef Name);
+ /// Has fast fma f32
+ bool hasFastFMAF() const {
+ return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_FMA_F32);
+ }
- static GPUInfo parseAMDGCNName(StringRef Name);
+ /// Has fast fma f64
+ bool hasFastFMA() const {
+ return getTriple().getArch() == llvm::Triple::amdgcn;
+ }
- GPUInfo parseGPUName(StringRef Name) const;
+ bool hasFMAF() const {
+ return getTriple().getArch() == llvm::Triple::amdgcn ||
+ !!(GPUFeatures & llvm::AMDGPU::FEATURE_FMA);
+ }
- GPUInfo GPU;
+ bool hasFullRateDenormalsF32() const {
+ return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32);
+ }
+
+ bool hasLDEXPF() const {
+ return getTriple().getArch() == llvm::Triple::amdgcn ||
+ !!(GPUFeatures & llvm::AMDGPU::FEATURE_LDEXP);
+ }
static bool isAMDGCN(const llvm::Triple &TT) {
return TT.getArch() == llvm::Triple::amdgcn;
}
+ static bool isR600(const llvm::Triple &TT) {
+ return TT.getArch() == llvm::Triple::r600;
+ }
+
public:
AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts);
@@ -187,10 +88,12 @@ public:
void adjust(LangOptions &Opts) override;
uint64_t getPointerWidthV(unsigned AddrSpace) const override {
- if (GPU.Kind <= GK_R600_LAST)
+ if (isR600(getTriple()))
return 32;
+
if (AddrSpace == Private || AddrSpace == Local)
return 32;
+
return 64;
}
@@ -321,20 +224,22 @@ public:
bool isValidCPUName(StringRef Name) const override {
if (getTriple().getArch() == llvm::Triple::amdgcn)
- return GK_NONE != parseAMDGCNName(Name).Kind;
- else
- return GK_NONE != parseR600Name(Name).Kind;
+ return llvm::AMDGPU::parseArchAMDGCN(Name) != llvm::AMDGPU::GK_NONE;
+ return llvm::AMDGPU::parseArchR600(Name) != llvm::AMDGPU::GK_NONE;
}
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
- if (getTriple().getArch() == llvm::Triple::amdgcn)
- GPU = parseAMDGCNName(Name);
- else
- GPU = parseR600Name(Name);
+ if (getTriple().getArch() == llvm::Triple::amdgcn) {
+ GPUKind = llvm::AMDGPU::parseArchAMDGCN(Name);
+ GPUFeatures = llvm::AMDGPU::getArchAttrAMDGCN(GPUKind);
+ } else {
+ GPUKind = llvm::AMDGPU::parseArchR600(Name);
+ GPUFeatures = llvm::AMDGPU::getArchAttrR600(GPUKind);
+ }
- return GK_NONE != GPU.Kind;
+ return GPUKind != llvm::AMDGPU::GK_NONE;
}
void setSupportedOpenCLOpts() override {
@@ -342,16 +247,20 @@ public:
Opts.support("cl_clang_storage_class_specifiers");
Opts.support("cl_khr_icd");
- if (GPU.HasFP64)
+ bool IsAMDGCN = isAMDGCN(getTriple());
+
+ if (hasFP64())
Opts.support("cl_khr_fp64");
- if (GPU.Kind >= GK_CEDAR) {
+
+ if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) {
Opts.support("cl_khr_byte_addressable_store");
Opts.support("cl_khr_global_int32_base_atomics");
Opts.support("cl_khr_global_int32_extended_atomics");
Opts.support("cl_khr_local_int32_base_atomics");
Opts.support("cl_khr_local_int32_extended_atomics");
}
- if (GPU.Kind >= GK_AMDGCN_FIRST) {
+
+ if (IsAMDGCN) {
Opts.support("cl_khr_fp16");
Opts.support("cl_khr_int64_base_atomics");
Opts.support("cl_khr_int64_extended_atomics");
@@ -378,6 +287,27 @@ public:
}
}
+ LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const override {
+ switch (AS) {
+ case 0:
+ return LangAS::opencl_generic;
+ case 1:
+ return LangAS::opencl_global;
+ case 3:
+ return LangAS::opencl_local;
+ case 4:
+ return LangAS::opencl_constant;
+ case 5:
+ return LangAS::opencl_private;
+ default:
+ return getLangASFromTargetAS(AS);
+ }
+ }
+
+ LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
+ return LangAS::Default;
+ }
+
llvm::Optional<LangAS> getConstantAddressSpace() const override {
return getLangASFromTargetAS(Constant);
}
diff --git a/lib/Basic/Targets/ARC.cpp b/lib/Basic/Targets/ARC.cpp
new file mode 100644
index 000000000000..2159ab8e2020
--- /dev/null
+++ b/lib/Basic/Targets/ARC.cpp
@@ -0,0 +1,25 @@
+//===--- ARC.cpp - Implement ARC target feature support -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ARC TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARC.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void ARCTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__arc__");
+}
diff --git a/lib/Basic/Targets/ARC.h b/lib/Basic/Targets/ARC.h
new file mode 100644
index 000000000000..ee20568f3d5b
--- /dev/null
+++ b/lib/Basic/Targets/ARC.h
@@ -0,0 +1,74 @@
+//===--- ARC.h - Declare ARC target feature support -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares ARC TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY ARCTargetInfo : public TargetInfo {
+public:
+ ARCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongLongAlign = 32;
+ SuitableAlign = 32;
+ DoubleAlign = LongDoubleAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ UseZeroLengthBitfieldAlignment = true;
+ resetDataLayout("e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-"
+ "i32:32:32-f32:32:32-i64:32-f64:32-a:0:32-n32");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "gp", "sp", "fp", "ilink1", "r30", "blink"};
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ return false;
+ }
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_ARC_H
diff --git a/lib/Basic/Targets/ARM.cpp b/lib/Basic/Targets/ARM.cpp
index 19fcc5abea97..16644ace108b 100644
--- a/lib/Basic/Targets/ARM.cpp
+++ b/lib/Basic/Targets/ARM.cpp
@@ -28,8 +28,8 @@ void ARMTargetInfo::setABIAAPCS() {
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
const llvm::Triple &T = getTriple();
- bool IsNetBSD = T.getOS() == llvm::Triple::NetBSD;
- bool IsOpenBSD = T.getOS() == llvm::Triple::OpenBSD;
+ bool IsNetBSD = T.isOSNetBSD();
+ bool IsOpenBSD = T.isOSOpenBSD();
if (!T.isOSWindows() && !IsNetBSD && !IsOpenBSD)
WCharType = UnsignedInt;
@@ -189,6 +189,8 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_3A";
case llvm::ARM::ArchKind::ARMV8_4A:
return "8_4A";
+ case llvm::ARM::ArchKind::ARMV8_5A:
+ return "8_5A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -215,8 +217,8 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
- bool IsOpenBSD = Triple.getOS() == llvm::Triple::OpenBSD;
- bool IsNetBSD = Triple.getOS() == llvm::Triple::NetBSD;
+ bool IsOpenBSD = Triple.isOSOpenBSD();
+ bool IsNetBSD = Triple.isOSNetBSD();
// FIXME: the isOSBinFormatMachO is a workaround for identifying a Darwin-like
// environment where size_t is `unsigned long` rather than `unsigned int`
@@ -280,9 +282,9 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
setABI("apcs-gnu");
break;
default:
- if (Triple.getOS() == llvm::Triple::NetBSD)
+ if (IsNetBSD)
setABI("apcs-gnu");
- else if (Triple.getOS() == llvm::Triple::OpenBSD)
+ else if (IsOpenBSD)
setABI("aapcs-linux");
else
setABI("aapcs");
@@ -661,7 +663,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// ACLE 6.4.9 32-bit SIMD instructions
- if (ArchVersion >= 6 && (CPUProfile != "M" || CPUAttr == "7EM"))
+ if ((CPUProfile != "M" && ArchVersion >= 6) || (CPUProfile == "M" && DSP))
Builder.defineMacro("__ARM_FEATURE_SIMD32", "1");
// ACLE 6.4.10 Hardware Integer Divide
@@ -994,6 +996,7 @@ WindowsARMTargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_Swift:
return CCCR_OK;
default:
return CCCR_Warning;
diff --git a/lib/Basic/Targets/Hexagon.cpp b/lib/Basic/Targets/Hexagon.cpp
index 0ef1f6db281e..94e1388e381e 100644
--- a/lib/Basic/Targets/Hexagon.cpp
+++ b/lib/Basic/Targets/Hexagon.cpp
@@ -25,14 +25,7 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
- if (CPU == "hexagonv4") {
- Builder.defineMacro("__HEXAGON_V4__");
- Builder.defineMacro("__HEXAGON_ARCH__", "4");
- if (Opts.HexagonQdsp6Compat) {
- Builder.defineMacro("__QDSP6_V4__");
- Builder.defineMacro("__QDSP6_ARCH__", "4");
- }
- } else if (CPU == "hexagonv5") {
+ if (CPU == "hexagonv5") {
Builder.defineMacro("__HEXAGON_V5__");
Builder.defineMacro("__HEXAGON_ARCH__", "5");
if (Opts.HexagonQdsp6Compat) {
@@ -55,6 +48,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv65") {
Builder.defineMacro("__HEXAGON_V65__");
Builder.defineMacro("__HEXAGON_ARCH__", "65");
+ } else if (CPU == "hexagonv66") {
+ Builder.defineMacro("__HEXAGON_V66__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "66");
}
if (hasFeature("hvx-length64b")) {
@@ -150,9 +146,9 @@ struct CPUSuffix {
};
static constexpr CPUSuffix Suffixes[] = {
- {{"hexagonv4"}, {"4"}}, {{"hexagonv5"}, {"5"}},
- {{"hexagonv55"}, {"55"}}, {{"hexagonv60"}, {"60"}},
- {{"hexagonv62"}, {"62"}}, {{"hexagonv65"}, {"65"}},
+ {{"hexagonv5"}, {"5"}}, {{"hexagonv55"}, {"55"}},
+ {{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
+ {{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/lib/Basic/Targets/Mips.cpp b/lib/Basic/Targets/Mips.cpp
index cbd5a01c3da8..d43edeae608f 100644
--- a/lib/Basic/Targets/Mips.cpp
+++ b/lib/Basic/Targets/Mips.cpp
@@ -59,6 +59,16 @@ void MipsTargetInfo::fillValidCPUList(
Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
}
+unsigned MipsTargetInfo::getISARev() const {
+ return llvm::StringSwitch<unsigned>(getCPU())
+ .Cases("mips32", "mips64", 1)
+ .Cases("mips32r2", "mips64r2", 2)
+ .Cases("mips32r3", "mips64r3", 3)
+ .Cases("mips32r5", "mips64r5", 5)
+ .Cases("mips32r6", "mips64r6", 6)
+ .Default(0);
+}
+
void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
if (BigEndian) {
@@ -84,13 +94,8 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_MIPS_ISA", "_MIPS_ISA_MIPS64");
}
- const std::string ISARev = llvm::StringSwitch<std::string>(getCPU())
- .Cases("mips32", "mips64", "1")
- .Cases("mips32r2", "mips64r2", "2")
- .Cases("mips32r3", "mips64r3", "3")
- .Cases("mips32r5", "mips64r5", "5")
- .Cases("mips32r6", "mips64r6", "6")
- .Default("");
+ const std::string ISARev = std::to_string(getISARev());
+
if (!ISARev.empty())
Builder.defineMacro("__mips_isa_rev", ISARev);
@@ -129,9 +134,22 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
if (IsSingleFloat)
Builder.defineMacro("__mips_single_float", Twine(1));
- Builder.defineMacro("__mips_fpr", HasFP64 ? Twine(64) : Twine(32));
- Builder.defineMacro("_MIPS_FPSET",
- Twine(32 / (HasFP64 || IsSingleFloat ? 1 : 2)));
+ switch (FPMode) {
+ case FPXX:
+ Builder.defineMacro("__mips_fpr", Twine(0));
+ break;
+ case FP32:
+ Builder.defineMacro("__mips_fpr", Twine(32));
+ break;
+ case FP64:
+ Builder.defineMacro("__mips_fpr", Twine(64));
+ break;
+}
+
+ if (FPMode == FP64 || IsSingleFloat)
+ Builder.defineMacro("_MIPS_FPSET", Twine(32));
+ else
+ Builder.defineMacro("_MIPS_FPSET", Twine(16));
if (IsMips16)
Builder.defineMacro("__mips16", Twine(1));
@@ -189,7 +207,7 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
bool MipsTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("mips", true)
- .Case("fp64", HasFP64)
+ .Case("fp64", FPMode == FP64)
.Default(false);
}
@@ -235,5 +253,30 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
return false;
}
+ // -fpxx is valid only for the o32 ABI
+ if (FPMode == FPXX && (ABI == "n32" || ABI == "n64")) {
+ Diags.Report(diag::err_unsupported_abi_for_opt) << "-mfpxx" << "o32";
+ return false;
+ }
+
+ // -mfp32 and n32/n64 ABIs are incompatible
+ if (FPMode != FP64 && FPMode != FPXX && !IsSingleFloat &&
+ (ABI == "n32" || ABI == "n64")) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfpxx" << CPU;
+ return false;
+ }
+ // Mips revision 6 and -mfp32 are incompatible
+ if (FPMode != FP64 && FPMode != FPXX && (CPU == "mips32r6" ||
+ CPU == "mips64r6")) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfp32" << CPU;
+ return false;
+ }
+ // Option -mfp64 permitted on Mips32 iff revision 2 or higher is present
+ if (FPMode == FP64 && (CPU == "mips1" || CPU == "mips2" ||
+ getISARev() < 2) && ABI == "o32") {
+ Diags.Report(diag::err_mips_fp64_req) << "-mfp64";
+ return false;
+ }
+
return true;
}
diff --git a/lib/Basic/Targets/Mips.h b/lib/Basic/Targets/Mips.h
index 11e9ac914430..d49f49888b0c 100644
--- a/lib/Basic/Targets/Mips.h
+++ b/lib/Basic/Targets/Mips.h
@@ -57,7 +57,7 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
bool UseIndirectJumpHazard;
protected:
- bool HasFP64;
+ enum FPModeEnum { FPXX, FP32, FP64 } FPMode;
std::string ABI;
public:
@@ -66,15 +66,20 @@ public:
IsNan2008(false), IsAbs2008(false), IsSingleFloat(false),
IsNoABICalls(false), CanUseBSDABICalls(false), FloatABI(HardFloat),
DspRev(NoDSP), HasMSA(false), DisableMadd4(false),
- UseIndirectJumpHazard(false), HasFP64(false) {
+ UseIndirectJumpHazard(false), FPMode(FPXX) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
- setABI(getTriple().isMIPS32() ? "o32" : "n64");
+ if (Triple.isMIPS32())
+ setABI("o32");
+ else if (Triple.getEnvironment() == llvm::Triple::GNUABIN32)
+ setABI("n32");
+ else
+ setABI("n64");
CPU = ABI == "o32" ? "mips32r2" : "mips64r2";
- CanUseBSDABICalls = Triple.getOS() == llvm::Triple::FreeBSD ||
- Triple.getOS() == llvm::Triple::OpenBSD;
+ CanUseBSDABICalls = Triple.isOSFreeBSD() ||
+ Triple.isOSOpenBSD();
}
bool isIEEE754_2008Default() const {
@@ -127,7 +132,7 @@ public:
void setN32N64ABITypes() {
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
- if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ if (getTriple().isOSFreeBSD()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
@@ -137,7 +142,7 @@ public:
void setN64ABITypes() {
setN32N64ABITypes();
- if (getTriple().getOS() == llvm::Triple::OpenBSD) {
+ if (getTriple().isOSOpenBSD()) {
Int64Type = SignedLongLong;
} else {
Int64Type = SignedLong;
@@ -181,6 +186,8 @@ public:
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
+ unsigned getISARev() const;
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
@@ -305,7 +312,7 @@ public:
IsSingleFloat = false;
FloatABI = HardFloat;
DspRev = NoDSP;
- HasFP64 = isFP64Default();
+ FPMode = isFP64Default() ? FP64 : FPXX;
for (const auto &Feature : Features) {
if (Feature == "+single-float")
@@ -325,9 +332,11 @@ public:
else if (Feature == "+nomadd4")
DisableMadd4 = true;
else if (Feature == "+fp64")
- HasFP64 = true;
+ FPMode = FP64;
else if (Feature == "-fp64")
- HasFP64 = false;
+ FPMode = FP32;
+ else if (Feature == "+fpxx")
+ FPMode = FPXX;
else if (Feature == "+nan2008")
IsNan2008 = true;
else if (Feature == "-nan2008")
diff --git a/lib/Basic/Targets/NVPTX.cpp b/lib/Basic/Targets/NVPTX.cpp
index fd4ee1606061..ca41c4d14ca3 100644
--- a/lib/Basic/Targets/NVPTX.cpp
+++ b/lib/Basic/Targets/NVPTX.cpp
@@ -188,6 +188,9 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX810:
case CudaArch::GFX900:
case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
case CudaArch::LAST:
break;
case CudaArch::UNKNOWN:
@@ -221,6 +224,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "700";
case CudaArch::SM_72:
return "720";
+ case CudaArch::SM_75:
+ return "750";
}
llvm_unreachable("unhandled CudaArch");
}();
diff --git a/lib/Basic/Targets/Nios2.cpp b/lib/Basic/Targets/Nios2.cpp
deleted file mode 100644
index 48f662dd98c1..000000000000
--- a/lib/Basic/Targets/Nios2.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//===--- Nios2.cpp - Implement Nios2 target feature support ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements Nios2 TargetInfo objects.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Nios2.h"
-#include "Targets.h"
-#include "clang/Basic/MacroBuilder.h"
-#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/ADT/StringSwitch.h"
-
-using namespace clang;
-using namespace clang::targets;
-
-const Builtin::Info Nios2TargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
-#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
-#include "clang/Basic/BuiltinsNios2.def"
-};
-
-bool Nios2TargetInfo::isFeatureSupportedByCPU(StringRef Feature,
- StringRef CPU) const {
- const bool isR2 = CPU == "nios2r2";
- return llvm::StringSwitch<bool>(Feature)
- .Case("nios2r2mandatory", isR2)
- .Case("nios2r2bmx", isR2)
- .Case("nios2r2mpx", isR2)
- .Case("nios2r2cdx", isR2)
- .Default(false);
-}
-
-void Nios2TargetInfo::getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
- DefineStd(Builder, "nios2", Opts);
- DefineStd(Builder, "NIOS2", Opts);
-
- Builder.defineMacro("__nios2");
- Builder.defineMacro("__NIOS2");
- Builder.defineMacro("__nios2__");
- Builder.defineMacro("__NIOS2__");
-}
-
-ArrayRef<Builtin::Info> Nios2TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::Nios2::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
-}
diff --git a/lib/Basic/Targets/Nios2.h b/lib/Basic/Targets/Nios2.h
deleted file mode 100644
index ffeb414d4778..000000000000
--- a/lib/Basic/Targets/Nios2.h
+++ /dev/null
@@ -1,151 +0,0 @@
-//===--- Nios2.h - Declare Nios2 target feature support ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares Nios2 TargetInfo objects.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_NIOS2_H
-#define LLVM_CLANG_LIB_BASIC_TARGETS_NIOS2_H
-
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/Compiler.h"
-
-namespace clang {
-namespace targets {
-
-class LLVM_LIBRARY_VISIBILITY Nios2TargetInfo : public TargetInfo {
- void setDataLayout() {
- if (BigEndian)
- resetDataLayout("E-p:32:32:32-i8:8:32-i16:16:32-n32");
- else
- resetDataLayout("e-p:32:32:32-i8:8:32-i16:16:32-n32");
- }
-
- static const Builtin::Info BuiltinInfo[];
- std::string CPU;
- std::string ABI;
-
-public:
- Nios2TargetInfo(const llvm::Triple &triple, const TargetOptions &opts)
- : TargetInfo(triple), CPU(opts.CPU), ABI(opts.ABI) {
- SizeType = UnsignedInt;
- PtrDiffType = SignedInt;
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
- setDataLayout();
- }
-
- StringRef getABI() const override { return ABI; }
- bool setABI(const std::string &Name) override {
- if (Name == "o32" || Name == "eabi") {
- ABI = Name;
- return true;
- }
- return false;
- }
-
- bool isValidCPUName(StringRef Name) const override {
- return Name == "nios2r1" || Name == "nios2r2";
- }
-
- void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
- Values.append({"nios2r1", "nios2r2"});
- }
-
- bool setCPU(const std::string &Name) override {
- if (isValidCPUName(Name)) {
- CPU = Name;
- return true;
- }
- return false;
- }
-
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override;
-
- ArrayRef<Builtin::Info> getTargetBuiltins() const override;
-
- bool isFeatureSupportedByCPU(StringRef Feature, StringRef CPU) const;
-
- bool
- initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
- StringRef CPU,
- const std::vector<std::string> &FeatureVec) const override {
- static const char *allFeatures[] = {"nios2r2mandatory", "nios2r2bmx",
- "nios2r2mpx", "nios2r2cdx"
- };
- for (const char *feature : allFeatures) {
- Features[feature] = isFeatureSupportedByCPU(feature, CPU);
- }
- return true;
- }
-
- bool hasFeature(StringRef Feature) const override {
- return isFeatureSupportedByCPU(Feature, CPU);
- }
-
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::VoidPtrBuiltinVaList;
- }
-
- ArrayRef<const char *> getGCCRegNames() const override {
- static const char *const GCCRegNames[] = {
- // CPU register names
- // Must match second column of GCCRegAliases
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
- "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20",
- "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30",
- "r31",
- // Floating point register names
- "ctl0", "ctl1", "ctl2", "ctl3", "ctl4", "ctl5", "ctl6", "ctl7", "ctl8",
- "ctl9", "ctl10", "ctl11", "ctl12", "ctl13", "ctl14", "ctl15"
- };
- return llvm::makeArrayRef(GCCRegNames);
- }
-
- bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &Info) const override {
- switch (*Name) {
- default:
- return false;
-
- case 'r': // CPU registers.
- case 'd': // Equivalent to "r" unless generating MIPS16 code.
- case 'y': // Equivalent to "r", backwards compatibility only.
- case 'f': // floating-point registers.
- case 'c': // $25 for indirect jumps
- case 'l': // lo register
- case 'x': // hilo register pair
- Info.setAllowsRegister();
- return true;
- }
- }
-
- const char *getClobbers() const override { return ""; }
-
- ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- static const TargetInfo::GCCRegAlias aliases[] = {
- {{"zero"}, "r0"}, {{"at"}, "r1"}, {{"et"}, "r24"},
- {{"bt"}, "r25"}, {{"gp"}, "r26"}, {{"sp"}, "r27"},
- {{"fp"}, "r28"}, {{"ea"}, "r29"}, {{"ba"}, "r30"},
- {{"ra"}, "r31"}, {{"status"}, "ctl0"}, {{"estatus"}, "ctl1"},
- {{"bstatus"}, "ctl2"}, {{"ienable"}, "ctl3"}, {{"ipending"}, "ctl4"},
- {{"cpuid"}, "ctl5"}, {{"exception"}, "ctl7"}, {{"pteaddr"}, "ctl8"},
- {{"tlbacc"}, "ctl9"}, {{"tlbmisc"}, "ctl10"}, {{"badaddr"}, "ctl12"},
- {{"config"}, "ctl13"}, {{"mpubase"}, "ctl14"}, {{"mpuacc"}, "ctl15"},
- };
- return llvm::makeArrayRef(aliases);
- }
-};
-
-} // namespace targets
-} // namespace clang
-#endif // LLVM_CLANG_LIB_BASIC_TARGETS_NIOS2_H
diff --git a/lib/Basic/Targets/OSTargets.cpp b/lib/Basic/Targets/OSTargets.cpp
index 50abd4ce0c8c..6252a51ef710 100644
--- a/lib/Basic/Targets/OSTargets.cpp
+++ b/lib/Basic/Targets/OSTargets.cpp
@@ -33,7 +33,7 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("_FORTIFY_SOURCE", "0");
// Darwin defines __weak, __strong, and __unsafe_unretained even in C mode.
- if (!Opts.ObjC1) {
+ if (!Opts.ObjC) {
// __weak is always defined, for use in blocks and with objc pointers.
Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
Builder.defineMacro("__strong", "");
diff --git a/lib/Basic/Targets/OSTargets.h b/lib/Basic/Targets/OSTargets.h
index d0354784acf9..085efa02cc5f 100644
--- a/lib/Basic/Targets/OSTargets.h
+++ b/lib/Basic/Targets/OSTargets.h
@@ -133,6 +133,15 @@ public:
/// is very similar to ELF's "protected"; Darwin requires a "weak"
/// attribute on declarations that can be dynamically replaced.
bool hasProtectedVisibility() const override { return false; }
+
+ TargetInfo::IntType getLeastIntTypeByWidth(unsigned BitWidth,
+ bool IsSigned) const final {
+ // Darwin uses `long long` for `int_least64_t` and `int_fast64_t`.
+ return BitWidth == 64
+ ? (IsSigned ? TargetInfo::SignedLongLong
+ : TargetInfo::UnsignedLongLong)
+ : TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
+ }
};
// DragonFlyBSD Target
@@ -257,6 +266,8 @@ protected:
Builder.defineMacro("__HAIKU__");
Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
@@ -267,7 +278,38 @@ public:
this->PtrDiffType = TargetInfo::SignedLong;
this->ProcessIDType = TargetInfo::SignedLong;
this->TLSSupported = false;
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
+ }
+};
+
+// Hurd target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY HurdTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Hurd defines; list based off of gcc output.
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__GNU__");
+ Builder.defineMacro("__gnu_hurd__");
+ Builder.defineMacro("__MACH__");
+ Builder.defineMacro("__GLIBC__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
}
+public:
+ HurdTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {}
};
// Minix Target
@@ -303,7 +345,6 @@ protected:
// Linux defines; list based off of gcc output
DefineStd(Builder, "unix", Opts);
DefineStd(Builder, "linux", Opts);
- Builder.defineMacro("__gnu_linux__");
Builder.defineMacro("__ELF__");
if (Triple.isAndroid()) {
Builder.defineMacro("__ANDROID__", "1");
@@ -313,6 +354,8 @@ protected:
this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
if (Maj)
Builder.defineMacro("__ANDROID_API__", Twine(Maj));
+ } else {
+ Builder.defineMacro("__gnu_linux__");
}
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
@@ -341,7 +384,6 @@ public:
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- case llvm::Triple::systemz:
this->HasFloat128 = true;
break;
}
@@ -397,7 +439,7 @@ public:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
this->HasFloat128 = true;
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
default:
this->MCountName = "__mcount";
break;
@@ -643,6 +685,7 @@ public:
WindowsTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedShort;
+ this->WIntType = TargetInfo::UnsignedShort;
}
};
diff --git a/lib/Basic/Targets/PPC.cpp b/lib/Basic/Targets/PPC.cpp
index b4eb3b1b97b7..6cfbed1713e1 100644
--- a/lib/Basic/Targets/PPC.cpp
+++ b/lib/Basic/Targets/PPC.cpp
@@ -83,8 +83,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (getTriple().getArch() == llvm::Triple::ppc64le) {
Builder.defineMacro("_LITTLE_ENDIAN");
} else {
- if (getTriple().getOS() != llvm::Triple::NetBSD &&
- getTriple().getOS() != llvm::Triple::OpenBSD)
+ if (!getTriple().isOSNetBSD() &&
+ !getTriple().isOSOpenBSD())
Builder.defineMacro("_BIG_ENDIAN");
}
@@ -412,6 +412,36 @@ ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
return llvm::makeArrayRef(GCCRegAliases);
}
+// PPC ELFABIv2 DWARF Definitoin "Table 2.26. Mappings of Common Registers".
+// vs0 ~ vs31 is mapping to 32 - 63,
+// vs32 ~ vs63 is mapping to 77 - 108.
+const TargetInfo::AddlRegName GCCAddlRegNames[] = {
+ // Table of additional register names to use in user input.
+ {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
+ {{"vs4"}, 36}, {{"vs5"}, 37}, {{"vs6"}, 38}, {{"vs7"}, 39},
+ {{"vs8"}, 40}, {{"vs9"}, 41}, {{"vs10"}, 42}, {{"vs11"}, 43},
+ {{"vs12"}, 44}, {{"vs13"}, 45}, {{"vs14"}, 46}, {{"vs15"}, 47},
+ {{"vs16"}, 48}, {{"vs17"}, 49}, {{"vs18"}, 50}, {{"vs19"}, 51},
+ {{"vs20"}, 52}, {{"vs21"}, 53}, {{"vs22"}, 54}, {{"vs23"}, 55},
+ {{"vs24"}, 56}, {{"vs25"}, 57}, {{"vs26"}, 58}, {{"vs27"}, 59},
+ {{"vs28"}, 60}, {{"vs29"}, 61}, {{"vs30"}, 62}, {{"vs31"}, 63},
+ {{"vs32"}, 77}, {{"vs33"}, 78}, {{"vs34"}, 79}, {{"vs35"}, 80},
+ {{"vs36"}, 81}, {{"vs37"}, 82}, {{"vs38"}, 83}, {{"vs39"}, 84},
+ {{"vs40"}, 85}, {{"vs41"}, 86}, {{"vs42"}, 87}, {{"vs43"}, 88},
+ {{"vs44"}, 89}, {{"vs45"}, 90}, {{"vs46"}, 91}, {{"vs47"}, 92},
+ {{"vs48"}, 93}, {{"vs49"}, 94}, {{"vs50"}, 95}, {{"vs51"}, 96},
+ {{"vs52"}, 97}, {{"vs53"}, 98}, {{"vs54"}, 99}, {{"vs55"}, 100},
+ {{"vs56"}, 101}, {{"vs57"}, 102}, {{"vs58"}, 103}, {{"vs59"}, 104},
+ {{"vs60"}, 105}, {{"vs61"}, 106}, {{"vs62"}, 107}, {{"vs63"}, 108},
+};
+
+ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
+ if (ABI == "elfv2")
+ return llvm::makeArrayRef(GCCAddlRegNames);
+ else
+ return TargetInfo::getGCCAddlRegNames();
+}
+
static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
{"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
diff --git a/lib/Basic/Targets/PPC.h b/lib/Basic/Targets/PPC.h
index 439c73a0e326..058970a0e098 100644
--- a/lib/Basic/Targets/PPC.h
+++ b/lib/Basic/Targets/PPC.h
@@ -176,6 +176,8 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
+
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
switch (*Name) {
@@ -201,6 +203,7 @@ public:
case 's': // VSX vector register to hold scalar float data
case 'a': // Any VSX register
case 'c': // An individual CR bit
+ case 'i': // FP or VSX register to hold 64-bit integers data
break;
default:
return false;
@@ -328,7 +331,7 @@ public:
break;
}
- if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ if (getTriple().isOSFreeBSD()) {
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
diff --git a/lib/Basic/Targets/Sparc.h b/lib/Basic/Targets/Sparc.h
index af2189f21468..5ae305bffb43 100644
--- a/lib/Basic/Targets/Sparc.h
+++ b/lib/Basic/Targets/Sparc.h
@@ -199,7 +199,7 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
// OpenBSD uses long long for int64_t and intmax_t.
- if (getTriple().getOS() == llvm::Triple::OpenBSD)
+ if (getTriple().isOSOpenBSD())
IntMaxType = SignedLongLong;
else
IntMaxType = SignedLong;
diff --git a/lib/Basic/Targets/WebAssembly.cpp b/lib/Basic/Targets/WebAssembly.cpp
index b8a2a092aff4..2fdc84bb8cc8 100644
--- a/lib/Basic/Targets/WebAssembly.cpp
+++ b/lib/Basic/Targets/WebAssembly.cpp
@@ -24,6 +24,8 @@ using namespace clang::targets;
const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
#include "clang/Basic/BuiltinsWebAssembly.def"
@@ -35,6 +37,7 @@ static constexpr llvm::StringLiteral ValidCPUNames[] = {
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
+ .Case("unimplemented-simd128", SIMDLevel >= UnimplementedSIMD128)
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
.Case("sign-ext", HasSignExt)
.Case("exception-handling", HasExceptionHandling)
@@ -55,6 +58,44 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
if (SIMDLevel >= SIMD128)
Builder.defineMacro("__wasm_simd128__");
+ if (SIMDLevel >= UnimplementedSIMD128)
+ Builder.defineMacro("__wasm_unimplemented_simd128__");
+}
+
+void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
+ SIMDEnum Level) {
+ switch (Level) {
+ case UnimplementedSIMD128:
+ Features["unimplemented-simd128"] = true;
+ LLVM_FALLTHROUGH;
+ case SIMD128:
+ Features["simd128"] = true;
+ LLVM_FALLTHROUGH;
+ case NoSIMD:
+ break;
+ }
+}
+
+bool WebAssemblyTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ if (CPU == "bleeding-edge") {
+ Features["nontrapping-fptoint"] = true;
+ Features["sign-ext"] = true;
+ setSIMDLevel(Features, SIMD128);
+ }
+ // Other targets do not consider user-configured features here, but while we
+ // are actively developing new features it is useful to let user-configured
+ // features control availability of builtins
+ setSIMDLevel(Features, SIMDLevel);
+ if (HasNontrappingFPToInt)
+ Features["nontrapping-fptoint"] = true;
+ if (HasSignExt)
+ Features["sign-ext"] = true;
+ if (HasExceptionHandling)
+ Features["exception-handling"] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
bool WebAssemblyTargetInfo::handleTargetFeatures(
@@ -68,6 +109,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
continue;
}
+ if (Feature == "+unimplemented-simd128") {
+ SIMDLevel = std::max(SIMDLevel, SIMDEnum(UnimplementedSIMD128));
+ continue;
+ }
+ if (Feature == "-unimplemented-simd128") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(UnimplementedSIMD128 - 1));
+ continue;
+ }
if (Feature == "+nontrapping-fptoint") {
HasNontrappingFPToInt = true;
continue;
diff --git a/lib/Basic/Targets/WebAssembly.h b/lib/Basic/Targets/WebAssembly.h
index c04c5cb6fb3a..3dea9a373cb4 100644
--- a/lib/Basic/Targets/WebAssembly.h
+++ b/lib/Basic/Targets/WebAssembly.h
@@ -28,7 +28,8 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
enum SIMDEnum {
NoSIMD,
SIMD128,
- } SIMDLevel;
+ UnimplementedSIMD128,
+ } SIMDLevel = NoSIMD;
bool HasNontrappingFPToInt;
bool HasSignExt;
@@ -59,18 +60,12 @@ protected:
MacroBuilder &Builder) const override;
private:
+ static void setSIMDLevel(llvm::StringMap<bool> &Features, SIMDEnum Level);
+
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
- const std::vector<std::string> &FeaturesVec) const override {
- if (CPU == "bleeding-edge") {
- Features["simd128"] = true;
- Features["nontrapping-fptoint"] = true;
- Features["sign-ext"] = true;
- }
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
- }
-
+ const std::vector<std::string> &FeaturesVec) const override;
bool hasFeature(StringRef Feature) const final;
bool handleTargetFeatures(std::vector<std::string> &Features,
diff --git a/lib/Basic/Targets/X86.cpp b/lib/Basic/Targets/X86.cpp
index e295cff9d5d2..53b4c153e952 100644
--- a/lib/Basic/Targets/X86.cpp
+++ b/lib/Basic/Targets/X86.cpp
@@ -142,7 +142,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "gfni", true);
setFeatureEnabledImpl(Features, "vpclmulqdq", true);
setFeatureEnabledImpl(Features, "avx512bitalg", true);
- setFeatureEnabledImpl(Features, "avx512vnni", true);
setFeatureEnabledImpl(Features, "avx512vbmi2", true);
setFeatureEnabledImpl(Features, "avx512vpopcntdq", true);
setFeatureEnabledImpl(Features, "rdpid", true);
@@ -152,6 +151,12 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512vbmi", true);
setFeatureEnabledImpl(Features, "sha", true);
LLVM_FALLTHROUGH;
+ case CK_Cascadelake:
+ //Cannonlake has no VNNI feature inside while Icelake has
+ if (Kind != CK_Cannonlake)
+ // CLK inherits all SKX features plus AVX512_VNNI
+ setFeatureEnabledImpl(Features, "avx512vnni", true);
+ LLVM_FALLTHROUGH;
case CK_SkylakeServer:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
@@ -166,10 +171,12 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
- if (Kind != CK_SkylakeServer) // SKX inherits all SKL features, except SGX
+ if (Kind != CK_SkylakeServer
+ && Kind != CK_Cascadelake)
+ // SKX/CLX inherits all SKL features, except SGX
setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
- setFeatureEnabledImpl(Features, "rtm", true);
+ setFeatureEnabledImpl(Features, "aes", true);
LLVM_FALLTHROUGH;
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
@@ -196,7 +203,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "xsaveopt", true);
LLVM_FALLTHROUGH;
case CK_Westmere:
- setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
LLVM_FALLTHROUGH;
case CK_Nehalem:
@@ -248,10 +254,10 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "clflushopt", true);
setFeatureEnabledImpl(Features, "mpx", true);
setFeatureEnabledImpl(Features, "fsgsbase", true);
+ setFeatureEnabledImpl(Features, "aes", true);
LLVM_FALLTHROUGH;
case CK_Silvermont:
setFeatureEnabledImpl(Features, "rdrnd", true);
- setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
setFeatureEnabledImpl(Features, "sse4.2", true);
setFeatureEnabledImpl(Features, "prfchw", true);
@@ -281,7 +287,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "f16c", true);
@@ -796,8 +801,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCLDEMOTE = true;
} else if (Feature == "+rdpid") {
HasRDPID = true;
- } else if (Feature == "+retpoline") {
- HasRetpoline = true;
} else if (Feature == "+retpoline-external-thunk") {
HasRetpolineExternalThunk = true;
} else if (Feature == "+sahf") {
@@ -862,6 +865,11 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
/// definitions for this particular subtarget.
void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ std::string CodeModel = getTargetOpts().CodeModel;
+ if (CodeModel == "default")
+ CodeModel = "small";
+ Builder.defineMacro("__code_model_" + CodeModel + "_");
+
// Target identification.
if (getTriple().getArch() == llvm::Triple::x86_64) {
Builder.defineMacro("__amd64__");
@@ -948,6 +956,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Broadwell:
case CK_SkylakeClient:
case CK_SkylakeServer:
+ case CK_Cascadelake:
case CK_Cannonlake:
case CK_IcelakeClient:
case CK_IcelakeServer:
@@ -1083,6 +1092,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasMWAITX)
Builder.defineMacro("__MWAITX__");
+ if (HasMOVBE)
+ Builder.defineMacro("__MOVBE__");
+
switch (XOPLevel) {
case XOP:
Builder.defineMacro("__XOP__");
@@ -1397,7 +1409,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("rdpid", HasRDPID)
.Case("rdrnd", HasRDRND)
.Case("rdseed", HasRDSEED)
- .Case("retpoline", HasRetpoline)
.Case("retpoline-external-thunk", HasRetpolineExternalThunk)
.Case("rtm", HasRTM)
.Case("sahf", HasLAHFSAHF)
@@ -1678,6 +1689,7 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
return false;
break;
}
+ LLVM_FALLTHROUGH;
case 'v':
case 'x':
if (SSELevel >= AVX512F)
diff --git a/lib/Basic/Targets/X86.h b/lib/Basic/Targets/X86.h
index 019bc8d51a63..05930ae9eec0 100644
--- a/lib/Basic/Targets/X86.h
+++ b/lib/Basic/Targets/X86.h
@@ -98,7 +98,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasMOVBE = false;
bool HasPREFETCHWT1 = false;
bool HasRDPID = false;
- bool HasRetpoline = false;
bool HasRetpolineExternalThunk = false;
bool HasLAHFSAHF = false;
bool HasWBNOINVD = false;
@@ -226,6 +225,7 @@ public:
case 'Y':
if ((++I != E) && ((*I == '0') || (*I == 'z')))
return "xmm0";
+ break;
default:
break;
}
@@ -291,9 +291,6 @@ public:
return checkCPUKind(CPU = getCPUKind(Name));
}
- bool supportsMultiVersioning() const override {
- return getTriple().isOSBinFormatELF();
- }
unsigned multiVersionSortPriority(StringRef Name) const override;
bool setFPMath(StringRef Name) override;
@@ -350,11 +347,9 @@ public:
(1 << TargetInfo::LongDouble));
// x86-32 has atomics up to 8 bytes
- CPUKind Kind = getCPUKind(Opts.CPU);
- if (Kind >= CK_i586 || Kind == CK_Generic)
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
- else if (Kind >= CK_i486)
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ // FIXME: Check that we actually have cmpxchg8b before setting
+ // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
deleted file mode 100644
index bcfcbdbb9014..000000000000
--- a/lib/Basic/VirtualFileSystem.cpp
+++ /dev/null
@@ -1,2026 +0,0 @@
-//===- VirtualFileSystem.cpp - Virtual File System Layer ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the VirtualFileSystem interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Config/llvm-config.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Chrono.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Errc.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/Process.h"
-#include "llvm/Support/SMLoc.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/YAMLParser.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <iterator>
-#include <limits>
-#include <map>
-#include <memory>
-#include <string>
-#include <system_error>
-#include <utility>
-#include <vector>
-
-using namespace clang;
-using namespace vfs;
-using namespace llvm;
-
-using llvm::sys::fs::file_status;
-using llvm::sys::fs::file_type;
-using llvm::sys::fs::perms;
-using llvm::sys::fs::UniqueID;
-
-Status::Status(const file_status &Status)
- : UID(Status.getUniqueID()), MTime(Status.getLastModificationTime()),
- User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
- Type(Status.type()), Perms(Status.permissions()) {}
-
-Status::Status(StringRef Name, UniqueID UID, sys::TimePoint<> MTime,
- uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
- perms Perms)
- : Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
- Type(Type), Perms(Perms) {}
-
-Status Status::copyWithNewName(const Status &In, StringRef NewName) {
- return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
- In.getUser(), In.getGroup(), In.getSize(), In.getType(),
- In.getPermissions());
-}
-
-Status Status::copyWithNewName(const file_status &In, StringRef NewName) {
- return Status(NewName, In.getUniqueID(), In.getLastModificationTime(),
- In.getUser(), In.getGroup(), In.getSize(), In.type(),
- In.permissions());
-}
-
-bool Status::equivalent(const Status &Other) const {
- assert(isStatusKnown() && Other.isStatusKnown());
- return getUniqueID() == Other.getUniqueID();
-}
-
-bool Status::isDirectory() const {
- return Type == file_type::directory_file;
-}
-
-bool Status::isRegularFile() const {
- return Type == file_type::regular_file;
-}
-
-bool Status::isOther() const {
- return exists() && !isRegularFile() && !isDirectory() && !isSymlink();
-}
-
-bool Status::isSymlink() const {
- return Type == file_type::symlink_file;
-}
-
-bool Status::isStatusKnown() const {
- return Type != file_type::status_error;
-}
-
-bool Status::exists() const {
- return isStatusKnown() && Type != file_type::file_not_found;
-}
-
-File::~File() = default;
-
-FileSystem::~FileSystem() = default;
-
-ErrorOr<std::unique_ptr<MemoryBuffer>>
-FileSystem::getBufferForFile(const llvm::Twine &Name, int64_t FileSize,
- bool RequiresNullTerminator, bool IsVolatile) {
- auto F = openFileForRead(Name);
- if (!F)
- return F.getError();
-
- return (*F)->getBuffer(Name, FileSize, RequiresNullTerminator, IsVolatile);
-}
-
-std::error_code FileSystem::makeAbsolute(SmallVectorImpl<char> &Path) const {
- if (llvm::sys::path::is_absolute(Path))
- return {};
-
- auto WorkingDir = getCurrentWorkingDirectory();
- if (!WorkingDir)
- return WorkingDir.getError();
-
- return llvm::sys::fs::make_absolute(WorkingDir.get(), Path);
-}
-
-std::error_code FileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- return errc::operation_not_permitted;
-}
-
-bool FileSystem::exists(const Twine &Path) {
- auto Status = status(Path);
- return Status && Status->exists();
-}
-
-#ifndef NDEBUG
-static bool isTraversalComponent(StringRef Component) {
- return Component.equals("..") || Component.equals(".");
-}
-
-static bool pathHasTraversal(StringRef Path) {
- using namespace llvm::sys;
-
- for (StringRef Comp : llvm::make_range(path::begin(Path), path::end(Path)))
- if (isTraversalComponent(Comp))
- return true;
- return false;
-}
-#endif
-
-//===-----------------------------------------------------------------------===/
-// RealFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-namespace {
-
-/// Wrapper around a raw file descriptor.
-class RealFile : public File {
- friend class RealFileSystem;
-
- int FD;
- Status S;
- std::string RealName;
-
- RealFile(int FD, StringRef NewName, StringRef NewRealPathName)
- : FD(FD), S(NewName, {}, {}, {}, {}, {},
- llvm::sys::fs::file_type::status_error, {}),
- RealName(NewRealPathName.str()) {
- assert(FD >= 0 && "Invalid or inactive file descriptor");
- }
-
-public:
- ~RealFile() override;
-
- ErrorOr<Status> status() override;
- ErrorOr<std::string> getName() override;
- ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name,
- int64_t FileSize,
- bool RequiresNullTerminator,
- bool IsVolatile) override;
- std::error_code close() override;
-};
-
-} // namespace
-
-RealFile::~RealFile() { close(); }
-
-ErrorOr<Status> RealFile::status() {
- assert(FD != -1 && "cannot stat closed file");
- if (!S.isStatusKnown()) {
- file_status RealStatus;
- if (std::error_code EC = sys::fs::status(FD, RealStatus))
- return EC;
- S = Status::copyWithNewName(RealStatus, S.getName());
- }
- return S;
-}
-
-ErrorOr<std::string> RealFile::getName() {
- return RealName.empty() ? S.getName().str() : RealName;
-}
-
-ErrorOr<std::unique_ptr<MemoryBuffer>>
-RealFile::getBuffer(const Twine &Name, int64_t FileSize,
- bool RequiresNullTerminator, bool IsVolatile) {
- assert(FD != -1 && "cannot get buffer for closed file");
- return MemoryBuffer::getOpenFile(FD, Name, FileSize, RequiresNullTerminator,
- IsVolatile);
-}
-
-std::error_code RealFile::close() {
- std::error_code EC = sys::Process::SafelyCloseFileDescriptor(FD);
- FD = -1;
- return EC;
-}
-
-namespace {
-
-/// The file system according to your operating system.
-class RealFileSystem : public FileSystem {
-public:
- ErrorOr<Status> status(const Twine &Path) override;
- ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
- directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
-
- llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
- std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
- std::error_code getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const override;
-};
-
-} // namespace
-
-ErrorOr<Status> RealFileSystem::status(const Twine &Path) {
- sys::fs::file_status RealStatus;
- if (std::error_code EC = sys::fs::status(Path, RealStatus))
- return EC;
- return Status::copyWithNewName(RealStatus, Path.str());
-}
-
-ErrorOr<std::unique_ptr<File>>
-RealFileSystem::openFileForRead(const Twine &Name) {
- int FD;
- SmallString<256> RealName;
- if (std::error_code EC =
- sys::fs::openFileForRead(Name, FD, sys::fs::OF_None, &RealName))
- return EC;
- return std::unique_ptr<File>(new RealFile(FD, Name.str(), RealName.str()));
-}
-
-llvm::ErrorOr<std::string> RealFileSystem::getCurrentWorkingDirectory() const {
- SmallString<256> Dir;
- if (std::error_code EC = llvm::sys::fs::current_path(Dir))
- return EC;
- return Dir.str().str();
-}
-
-std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
- // FIXME: chdir is thread hostile; on the other hand, creating the same
- // behavior as chdir is complex: chdir resolves the path once, thus
- // guaranteeing that all subsequent relative path operations work
- // on the same path the original chdir resulted in. This makes a
- // difference for example on network filesystems, where symlinks might be
- // switched during runtime of the tool. Fixing this depends on having a
- // file system abstraction that allows openat() style interactions.
- return llvm::sys::fs::set_current_path(Path);
-}
-
-std::error_code
-RealFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- return llvm::sys::fs::real_path(Path, Output);
-}
-
-IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
- static IntrusiveRefCntPtr<FileSystem> FS = new RealFileSystem();
- return FS;
-}
-
-namespace {
-
-class RealFSDirIter : public clang::vfs::detail::DirIterImpl {
- llvm::sys::fs::directory_iterator Iter;
-
-public:
- RealFSDirIter(const Twine &Path, std::error_code &EC) : Iter(Path, EC) {
- if (Iter != llvm::sys::fs::directory_iterator()) {
- llvm::sys::fs::file_status S;
- std::error_code ErrorCode = llvm::sys::fs::status(Iter->path(), S, true);
- CurrentEntry = Status::copyWithNewName(S, Iter->path());
- if (!EC)
- EC = ErrorCode;
- }
- }
-
- std::error_code increment() override {
- std::error_code EC;
- Iter.increment(EC);
- if (Iter == llvm::sys::fs::directory_iterator()) {
- CurrentEntry = Status();
- } else {
- llvm::sys::fs::file_status S;
- std::error_code ErrorCode = llvm::sys::fs::status(Iter->path(), S, true);
- CurrentEntry = Status::copyWithNewName(S, Iter->path());
- if (!EC)
- EC = ErrorCode;
- }
- return EC;
- }
-};
-
-} // namespace
-
-directory_iterator RealFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- return directory_iterator(std::make_shared<RealFSDirIter>(Dir, EC));
-}
-
-//===-----------------------------------------------------------------------===/
-// OverlayFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-OverlayFileSystem::OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> BaseFS) {
- FSList.push_back(std::move(BaseFS));
-}
-
-void OverlayFileSystem::pushOverlay(IntrusiveRefCntPtr<FileSystem> FS) {
- FSList.push_back(FS);
- // Synchronize added file systems by duplicating the working directory from
- // the first one in the list.
- FS->setCurrentWorkingDirectory(getCurrentWorkingDirectory().get());
-}
-
-ErrorOr<Status> OverlayFileSystem::status(const Twine &Path) {
- // FIXME: handle symlinks that cross file systems
- for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
- ErrorOr<Status> Status = (*I)->status(Path);
- if (Status || Status.getError() != llvm::errc::no_such_file_or_directory)
- return Status;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-ErrorOr<std::unique_ptr<File>>
-OverlayFileSystem::openFileForRead(const llvm::Twine &Path) {
- // FIXME: handle symlinks that cross file systems
- for (iterator I = overlays_begin(), E = overlays_end(); I != E; ++I) {
- auto Result = (*I)->openFileForRead(Path);
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-llvm::ErrorOr<std::string>
-OverlayFileSystem::getCurrentWorkingDirectory() const {
- // All file systems are synchronized, just take the first working directory.
- return FSList.front()->getCurrentWorkingDirectory();
-}
-
-std::error_code
-OverlayFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
- for (auto &FS : FSList)
- if (std::error_code EC = FS->setCurrentWorkingDirectory(Path))
- return EC;
- return {};
-}
-
-std::error_code
-OverlayFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- for (auto &FS : FSList)
- if (FS->exists(Path))
- return FS->getRealPath(Path, Output);
- return errc::no_such_file_or_directory;
-}
-
-clang::vfs::detail::DirIterImpl::~DirIterImpl() = default;
-
-namespace {
-
-class OverlayFSDirIterImpl : public clang::vfs::detail::DirIterImpl {
- OverlayFileSystem &Overlays;
- std::string Path;
- OverlayFileSystem::iterator CurrentFS;
- directory_iterator CurrentDirIter;
- llvm::StringSet<> SeenNames;
-
- std::error_code incrementFS() {
- assert(CurrentFS != Overlays.overlays_end() && "incrementing past end");
- ++CurrentFS;
- for (auto E = Overlays.overlays_end(); CurrentFS != E; ++CurrentFS) {
- std::error_code EC;
- CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
- if (EC && EC != errc::no_such_file_or_directory)
- return EC;
- if (CurrentDirIter != directory_iterator())
- break; // found
- }
- return {};
- }
-
- std::error_code incrementDirIter(bool IsFirstTime) {
- assert((IsFirstTime || CurrentDirIter != directory_iterator()) &&
- "incrementing past end");
- std::error_code EC;
- if (!IsFirstTime)
- CurrentDirIter.increment(EC);
- if (!EC && CurrentDirIter == directory_iterator())
- EC = incrementFS();
- return EC;
- }
-
- std::error_code incrementImpl(bool IsFirstTime) {
- while (true) {
- std::error_code EC = incrementDirIter(IsFirstTime);
- if (EC || CurrentDirIter == directory_iterator()) {
- CurrentEntry = Status();
- return EC;
- }
- CurrentEntry = *CurrentDirIter;
- StringRef Name = llvm::sys::path::filename(CurrentEntry.getName());
- if (SeenNames.insert(Name).second)
- return EC; // name not seen before
- }
- llvm_unreachable("returned above");
- }
-
-public:
- OverlayFSDirIterImpl(const Twine &Path, OverlayFileSystem &FS,
- std::error_code &EC)
- : Overlays(FS), Path(Path.str()), CurrentFS(Overlays.overlays_begin()) {
- CurrentDirIter = (*CurrentFS)->dir_begin(Path, EC);
- EC = incrementImpl(true);
- }
-
- std::error_code increment() override { return incrementImpl(false); }
-};
-
-} // namespace
-
-directory_iterator OverlayFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- return directory_iterator(
- std::make_shared<OverlayFSDirIterImpl>(Dir, *this, EC));
-}
-
-namespace clang {
-namespace vfs {
-
-namespace detail {
-
-enum InMemoryNodeKind { IME_File, IME_Directory };
-
-/// The in memory file system is a tree of Nodes. Every node can either be a
-/// file or a directory.
-class InMemoryNode {
- Status Stat;
- InMemoryNodeKind Kind;
-
-public:
- InMemoryNode(Status Stat, InMemoryNodeKind Kind)
- : Stat(std::move(Stat)), Kind(Kind) {}
- virtual ~InMemoryNode() = default;
-
- const Status &getStatus() const { return Stat; }
- InMemoryNodeKind getKind() const { return Kind; }
- virtual std::string toString(unsigned Indent) const = 0;
-};
-
-namespace {
-
-class InMemoryFile : public InMemoryNode {
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
-
-public:
- InMemoryFile(Status Stat, std::unique_ptr<llvm::MemoryBuffer> Buffer)
- : InMemoryNode(std::move(Stat), IME_File), Buffer(std::move(Buffer)) {}
-
- llvm::MemoryBuffer *getBuffer() { return Buffer.get(); }
-
- std::string toString(unsigned Indent) const override {
- return (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
- }
-
- static bool classof(const InMemoryNode *N) {
- return N->getKind() == IME_File;
- }
-};
-
-/// Adapt a InMemoryFile for VFS' File interface.
-class InMemoryFileAdaptor : public File {
- InMemoryFile &Node;
-
-public:
- explicit InMemoryFileAdaptor(InMemoryFile &Node) : Node(Node) {}
-
- llvm::ErrorOr<Status> status() override { return Node.getStatus(); }
-
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
- bool IsVolatile) override {
- llvm::MemoryBuffer *Buf = Node.getBuffer();
- return llvm::MemoryBuffer::getMemBuffer(
- Buf->getBuffer(), Buf->getBufferIdentifier(), RequiresNullTerminator);
- }
-
- std::error_code close() override { return {}; }
-};
-
-} // namespace
-
-class InMemoryDirectory : public InMemoryNode {
- std::map<std::string, std::unique_ptr<InMemoryNode>> Entries;
-
-public:
- InMemoryDirectory(Status Stat)
- : InMemoryNode(std::move(Stat), IME_Directory) {}
-
- InMemoryNode *getChild(StringRef Name) {
- auto I = Entries.find(Name);
- if (I != Entries.end())
- return I->second.get();
- return nullptr;
- }
-
- InMemoryNode *addChild(StringRef Name, std::unique_ptr<InMemoryNode> Child) {
- return Entries.insert(make_pair(Name, std::move(Child)))
- .first->second.get();
- }
-
- using const_iterator = decltype(Entries)::const_iterator;
-
- const_iterator begin() const { return Entries.begin(); }
- const_iterator end() const { return Entries.end(); }
-
- std::string toString(unsigned Indent) const override {
- std::string Result =
- (std::string(Indent, ' ') + getStatus().getName() + "\n").str();
- for (const auto &Entry : Entries)
- Result += Entry.second->toString(Indent + 2);
- return Result;
- }
-
- static bool classof(const InMemoryNode *N) {
- return N->getKind() == IME_Directory;
- }
-};
-
-} // namespace detail
-
-InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
- : Root(new detail::InMemoryDirectory(
- Status("", getNextVirtualUniqueID(), llvm::sys::TimePoint<>(), 0, 0,
- 0, llvm::sys::fs::file_type::directory_file,
- llvm::sys::fs::perms::all_all))),
- UseNormalizedPaths(UseNormalizedPaths) {}
-
-InMemoryFileSystem::~InMemoryFileSystem() = default;
-
-std::string InMemoryFileSystem::toString() const {
- return Root->toString(/*Indent=*/0);
-}
-
-bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
- std::unique_ptr<llvm::MemoryBuffer> Buffer,
- Optional<uint32_t> User,
- Optional<uint32_t> Group,
- Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (Path.empty())
- return false;
-
- detail::InMemoryDirectory *Dir = Root.get();
- auto I = llvm::sys::path::begin(Path), E = sys::path::end(Path);
- const auto ResolvedUser = User.getValueOr(0);
- const auto ResolvedGroup = Group.getValueOr(0);
- const auto ResolvedType = Type.getValueOr(sys::fs::file_type::regular_file);
- const auto ResolvedPerms = Perms.getValueOr(sys::fs::all_all);
- // Any intermediate directories we create should be accessible by
- // the owner, even if Perms says otherwise for the final path.
- const auto NewDirectoryPerms = ResolvedPerms | sys::fs::owner_all;
- while (true) {
- StringRef Name = *I;
- detail::InMemoryNode *Node = Dir->getChild(Name);
- ++I;
- if (!Node) {
- if (I == E) {
- // End of the path, create a new file or directory.
- Status Stat(P.str(), getNextVirtualUniqueID(),
- llvm::sys::toTimePoint(ModificationTime), ResolvedUser,
- ResolvedGroup, Buffer->getBufferSize(), ResolvedType,
- ResolvedPerms);
- std::unique_ptr<detail::InMemoryNode> Child;
- if (ResolvedType == sys::fs::file_type::directory_file) {
- Child.reset(new detail::InMemoryDirectory(std::move(Stat)));
- } else {
- Child.reset(new detail::InMemoryFile(std::move(Stat),
- std::move(Buffer)));
- }
- Dir->addChild(Name, std::move(Child));
- return true;
- }
-
- // Create a new directory. Use the path up to here.
- Status Stat(
- StringRef(Path.str().begin(), Name.end() - Path.str().begin()),
- getNextVirtualUniqueID(), llvm::sys::toTimePoint(ModificationTime),
- ResolvedUser, ResolvedGroup, Buffer->getBufferSize(),
- sys::fs::file_type::directory_file, NewDirectoryPerms);
- Dir = cast<detail::InMemoryDirectory>(Dir->addChild(
- Name, llvm::make_unique<detail::InMemoryDirectory>(std::move(Stat))));
- continue;
- }
-
- if (auto *NewDir = dyn_cast<detail::InMemoryDirectory>(Node)) {
- Dir = NewDir;
- } else {
- assert(isa<detail::InMemoryFile>(Node) &&
- "Must be either file or directory!");
-
- // Trying to insert a directory in place of a file.
- if (I != E)
- return false;
-
- // Return false only if the new file is different from the existing one.
- return cast<detail::InMemoryFile>(Node)->getBuffer()->getBuffer() ==
- Buffer->getBuffer();
- }
- }
-}
-
-bool InMemoryFileSystem::addFileNoOwn(const Twine &P, time_t ModificationTime,
- llvm::MemoryBuffer *Buffer,
- Optional<uint32_t> User,
- Optional<uint32_t> Group,
- Optional<llvm::sys::fs::file_type> Type,
- Optional<llvm::sys::fs::perms> Perms) {
- return addFile(P, ModificationTime,
- llvm::MemoryBuffer::getMemBuffer(
- Buffer->getBuffer(), Buffer->getBufferIdentifier()),
- std::move(User), std::move(Group), std::move(Type),
- std::move(Perms));
-}
-
-static ErrorOr<detail::InMemoryNode *>
-lookupInMemoryNode(const InMemoryFileSystem &FS, detail::InMemoryDirectory *Dir,
- const Twine &P) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = FS.makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (FS.useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (Path.empty())
- return Dir;
-
- auto I = llvm::sys::path::begin(Path), E = llvm::sys::path::end(Path);
- while (true) {
- detail::InMemoryNode *Node = Dir->getChild(*I);
- ++I;
- if (!Node)
- return errc::no_such_file_or_directory;
-
- // Return the file if it's at the end of the path.
- if (auto File = dyn_cast<detail::InMemoryFile>(Node)) {
- if (I == E)
- return File;
- return errc::no_such_file_or_directory;
- }
-
- // Traverse directories.
- Dir = cast<detail::InMemoryDirectory>(Node);
- if (I == E)
- return Dir;
- }
-}
-
-llvm::ErrorOr<Status> InMemoryFileSystem::status(const Twine &Path) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Path);
- if (Node)
- return (*Node)->getStatus();
- return Node.getError();
-}
-
-llvm::ErrorOr<std::unique_ptr<File>>
-InMemoryFileSystem::openFileForRead(const Twine &Path) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Path);
- if (!Node)
- return Node.getError();
-
- // When we have a file provide a heap-allocated wrapper for the memory buffer
- // to match the ownership semantics for File.
- if (auto *F = dyn_cast<detail::InMemoryFile>(*Node))
- return std::unique_ptr<File>(new detail::InMemoryFileAdaptor(*F));
-
- // FIXME: errc::not_a_file?
- return make_error_code(llvm::errc::invalid_argument);
-}
-
-namespace {
-
-/// Adaptor from InMemoryDir::iterator to directory_iterator.
-class InMemoryDirIterator : public clang::vfs::detail::DirIterImpl {
- detail::InMemoryDirectory::const_iterator I;
- detail::InMemoryDirectory::const_iterator E;
-
-public:
- InMemoryDirIterator() = default;
-
- explicit InMemoryDirIterator(detail::InMemoryDirectory &Dir)
- : I(Dir.begin()), E(Dir.end()) {
- if (I != E)
- CurrentEntry = I->second->getStatus();
- }
-
- std::error_code increment() override {
- ++I;
- // When we're at the end, make CurrentEntry invalid and DirIterImpl will do
- // the rest.
- CurrentEntry = I != E ? I->second->getStatus() : Status();
- return {};
- }
-};
-
-} // namespace
-
-directory_iterator InMemoryFileSystem::dir_begin(const Twine &Dir,
- std::error_code &EC) {
- auto Node = lookupInMemoryNode(*this, Root.get(), Dir);
- if (!Node) {
- EC = Node.getError();
- return directory_iterator(std::make_shared<InMemoryDirIterator>());
- }
-
- if (auto *DirNode = dyn_cast<detail::InMemoryDirectory>(*Node))
- return directory_iterator(std::make_shared<InMemoryDirIterator>(*DirNode));
-
- EC = make_error_code(llvm::errc::not_a_directory);
- return directory_iterator(std::make_shared<InMemoryDirIterator>());
-}
-
-std::error_code InMemoryFileSystem::setCurrentWorkingDirectory(const Twine &P) {
- SmallString<128> Path;
- P.toVector(Path);
-
- // Fix up relative paths. This just prepends the current working directory.
- std::error_code EC = makeAbsolute(Path);
- assert(!EC);
- (void)EC;
-
- if (useNormalizedPaths())
- llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
-
- if (!Path.empty())
- WorkingDirectory = Path.str();
- return {};
-}
-
-std::error_code
-InMemoryFileSystem::getRealPath(const Twine &Path,
- SmallVectorImpl<char> &Output) const {
- auto CWD = getCurrentWorkingDirectory();
- if (!CWD || CWD->empty())
- return errc::operation_not_permitted;
- Path.toVector(Output);
- if (auto EC = makeAbsolute(Output))
- return EC;
- llvm::sys::path::remove_dots(Output, /*remove_dot_dot=*/true);
- return {};
-}
-
-} // namespace vfs
-} // namespace clang
-
-//===-----------------------------------------------------------------------===/
-// RedirectingFileSystem implementation
-//===-----------------------------------------------------------------------===/
-
-namespace {
-
-enum EntryKind {
- EK_Directory,
- EK_File
-};
-
-/// A single file or directory in the VFS.
-class Entry {
- EntryKind Kind;
- std::string Name;
-
-public:
- Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
- virtual ~Entry() = default;
-
- StringRef getName() const { return Name; }
- EntryKind getKind() const { return Kind; }
-};
-
-class RedirectingDirectoryEntry : public Entry {
- std::vector<std::unique_ptr<Entry>> Contents;
- Status S;
-
-public:
- RedirectingDirectoryEntry(StringRef Name,
- std::vector<std::unique_ptr<Entry>> Contents,
- Status S)
- : Entry(EK_Directory, Name), Contents(std::move(Contents)),
- S(std::move(S)) {}
- RedirectingDirectoryEntry(StringRef Name, Status S)
- : Entry(EK_Directory, Name), S(std::move(S)) {}
-
- Status getStatus() { return S; }
-
- void addContent(std::unique_ptr<Entry> Content) {
- Contents.push_back(std::move(Content));
- }
-
- Entry *getLastContent() const { return Contents.back().get(); }
-
- using iterator = decltype(Contents)::iterator;
-
- iterator contents_begin() { return Contents.begin(); }
- iterator contents_end() { return Contents.end(); }
-
- static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
-};
-
-class RedirectingFileEntry : public Entry {
-public:
- enum NameKind {
- NK_NotSet,
- NK_External,
- NK_Virtual
- };
-
-private:
- std::string ExternalContentsPath;
- NameKind UseName;
-
-public:
- RedirectingFileEntry(StringRef Name, StringRef ExternalContentsPath,
- NameKind UseName)
- : Entry(EK_File, Name), ExternalContentsPath(ExternalContentsPath),
- UseName(UseName) {}
-
- StringRef getExternalContentsPath() const { return ExternalContentsPath; }
-
- /// whether to use the external path as the name for this file.
- bool useExternalName(bool GlobalUseExternalName) const {
- return UseName == NK_NotSet ? GlobalUseExternalName
- : (UseName == NK_External);
- }
-
- NameKind getUseName() const { return UseName; }
-
- static bool classof(const Entry *E) { return E->getKind() == EK_File; }
-};
-
-class RedirectingFileSystem;
-
-class VFSFromYamlDirIterImpl : public clang::vfs::detail::DirIterImpl {
- std::string Dir;
- RedirectingFileSystem &FS;
- RedirectingDirectoryEntry::iterator Current, End;
-
-public:
- VFSFromYamlDirIterImpl(const Twine &Path, RedirectingFileSystem &FS,
- RedirectingDirectoryEntry::iterator Begin,
- RedirectingDirectoryEntry::iterator End,
- std::error_code &EC);
-
- std::error_code increment() override;
-};
-
-/// A virtual file system parsed from a YAML file.
-///
-/// Currently, this class allows creating virtual directories and mapping
-/// virtual file paths to existing external files, available in \c ExternalFS.
-///
-/// The basic structure of the parsed file is:
-/// \verbatim
-/// {
-/// 'version': <version number>,
-/// <optional configuration>
-/// 'roots': [
-/// <directory entries>
-/// ]
-/// }
-/// \endverbatim
-///
-/// All configuration options are optional.
-/// 'case-sensitive': <boolean, default=true>
-/// 'use-external-names': <boolean, default=true>
-/// 'overlay-relative': <boolean, default=false>
-/// 'ignore-non-existent-contents': <boolean, default=true>
-///
-/// Virtual directories are represented as
-/// \verbatim
-/// {
-/// 'type': 'directory',
-/// 'name': <string>,
-/// 'contents': [ <file or directory entries> ]
-/// }
-/// \endverbatim
-///
-/// The default attributes for virtual directories are:
-/// \verbatim
-/// MTime = now() when created
-/// Perms = 0777
-/// User = Group = 0
-/// Size = 0
-/// UniqueID = unspecified unique value
-/// \endverbatim
-///
-/// Re-mapped files are represented as
-/// \verbatim
-/// {
-/// 'type': 'file',
-/// 'name': <string>,
-/// 'use-external-name': <boolean> # Optional
-/// 'external-contents': <path to external file>)
-/// }
-/// \endverbatim
-///
-/// and inherit their attributes from the external contents.
-///
-/// In both cases, the 'name' field may contain multiple path components (e.g.
-/// /path/to/file). However, any directory that contains more than one child
-/// must be uniquely represented by a directory entry.
-class RedirectingFileSystem : public vfs::FileSystem {
- friend class RedirectingFileSystemParser;
-
- /// The root(s) of the virtual file system.
- std::vector<std::unique_ptr<Entry>> Roots;
-
- /// The file system to use for external references.
- IntrusiveRefCntPtr<FileSystem> ExternalFS;
-
- /// If IsRelativeOverlay is set, this represents the directory
- /// path that should be prefixed to each 'external-contents' entry
- /// when reading from YAML files.
- std::string ExternalContentsPrefixDir;
-
- /// @name Configuration
- /// @{
-
- /// Whether to perform case-sensitive comparisons.
- ///
- /// Currently, case-insensitive matching only works correctly with ASCII.
- bool CaseSensitive = true;
-
- /// IsRelativeOverlay marks whether a IsExternalContentsPrefixDir path must
- /// be prefixed in every 'external-contents' when reading from YAML files.
- bool IsRelativeOverlay = false;
-
- /// Whether to use to use the value of 'external-contents' for the
- /// names of files. This global value is overridable on a per-file basis.
- bool UseExternalNames = true;
-
- /// Whether an invalid path obtained via 'external-contents' should
- /// cause iteration on the VFS to stop. If 'true', the VFS should ignore
- /// the entry and continue with the next. Allows YAML files to be shared
- /// across multiple compiler invocations regardless of prior existent
- /// paths in 'external-contents'. This global value is overridable on a
- /// per-file basis.
- bool IgnoreNonExistentContents = true;
- /// @}
-
- /// Virtual file paths and external files could be canonicalized without "..",
- /// "." and "./" in their paths. FIXME: some unittests currently fail on
- /// win32 when using remove_dots and remove_leading_dotslash on paths.
- bool UseCanonicalizedPaths =
-#ifdef _WIN32
- false;
-#else
- true;
-#endif
-
-private:
- RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
- : ExternalFS(std::move(ExternalFS)) {}
-
- /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly
- /// recursing into the contents of \p From if it is a directory.
- ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
- sys::path::const_iterator End, Entry *From);
-
- /// Get the status of a given an \c Entry.
- ErrorOr<Status> status(const Twine &Path, Entry *E);
-
-public:
- /// Looks up \p Path in \c Roots.
- ErrorOr<Entry *> lookupPath(const Twine &Path);
-
- /// Parses \p Buffer, which is expected to be in YAML format and
- /// returns a virtual file system representing its contents.
- static RedirectingFileSystem *
- create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
- void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);
-
- ErrorOr<Status> status(const Twine &Path) override;
- ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;
-
- llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
- return ExternalFS->getCurrentWorkingDirectory();
- }
-
- std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
- return ExternalFS->setCurrentWorkingDirectory(Path);
- }
-
- directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override{
- ErrorOr<Entry *> E = lookupPath(Dir);
- if (!E) {
- EC = E.getError();
- return {};
- }
- ErrorOr<Status> S = status(Dir, *E);
- if (!S) {
- EC = S.getError();
- return {};
- }
- if (!S->isDirectory()) {
- EC = std::error_code(static_cast<int>(errc::not_a_directory),
- std::system_category());
- return {};
- }
-
- auto *D = cast<RedirectingDirectoryEntry>(*E);
- return directory_iterator(std::make_shared<VFSFromYamlDirIterImpl>(Dir,
- *this, D->contents_begin(), D->contents_end(), EC));
- }
-
- void setExternalContentsPrefixDir(StringRef PrefixDir) {
- ExternalContentsPrefixDir = PrefixDir.str();
- }
-
- StringRef getExternalContentsPrefixDir() const {
- return ExternalContentsPrefixDir;
- }
-
- bool ignoreNonExistentContents() const {
- return IgnoreNonExistentContents;
- }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void dump() const {
- for (const auto &Root : Roots)
- dumpEntry(Root.get());
- }
-
-LLVM_DUMP_METHOD void dumpEntry(Entry *E, int NumSpaces = 0) const {
- StringRef Name = E->getName();
- for (int i = 0, e = NumSpaces; i < e; ++i)
- dbgs() << " ";
- dbgs() << "'" << Name.str().c_str() << "'" << "\n";
-
- if (E->getKind() == EK_Directory) {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(E);
- assert(DE && "Should be a directory");
-
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end()))
- dumpEntry(SubEntry.get(), NumSpaces+2);
- }
- }
-#endif
-};
-
-/// A helper class to hold the common YAML parsing state.
-class RedirectingFileSystemParser {
- yaml::Stream &Stream;
-
- void error(yaml::Node *N, const Twine &Msg) {
- Stream.printError(N, Msg);
- }
-
- // false on error
- bool parseScalarString(yaml::Node *N, StringRef &Result,
- SmallVectorImpl<char> &Storage) {
- const auto *S = dyn_cast<yaml::ScalarNode>(N);
-
- if (!S) {
- error(N, "expected string");
- return false;
- }
- Result = S->getValue(Storage);
- return true;
- }
-
- // false on error
- bool parseScalarBool(yaml::Node *N, bool &Result) {
- SmallString<5> Storage;
- StringRef Value;
- if (!parseScalarString(N, Value, Storage))
- return false;
-
- if (Value.equals_lower("true") || Value.equals_lower("on") ||
- Value.equals_lower("yes") || Value == "1") {
- Result = true;
- return true;
- } else if (Value.equals_lower("false") || Value.equals_lower("off") ||
- Value.equals_lower("no") || Value == "0") {
- Result = false;
- return true;
- }
-
- error(N, "expected boolean value");
- return false;
- }
-
- struct KeyStatus {
- bool Required;
- bool Seen = false;
-
- KeyStatus(bool Required = false) : Required(Required) {}
- };
-
- using KeyStatusPair = std::pair<StringRef, KeyStatus>;
-
- // false on error
- bool checkDuplicateOrUnknownKey(yaml::Node *KeyNode, StringRef Key,
- DenseMap<StringRef, KeyStatus> &Keys) {
- if (!Keys.count(Key)) {
- error(KeyNode, "unknown key");
- return false;
- }
- KeyStatus &S = Keys[Key];
- if (S.Seen) {
- error(KeyNode, Twine("duplicate key '") + Key + "'");
- return false;
- }
- S.Seen = true;
- return true;
- }
-
- // false on error
- bool checkMissingKeys(yaml::Node *Obj, DenseMap<StringRef, KeyStatus> &Keys) {
- for (const auto &I : Keys) {
- if (I.second.Required && !I.second.Seen) {
- error(Obj, Twine("missing key '") + I.first + "'");
- return false;
- }
- }
- return true;
- }
-
- Entry *lookupOrCreateEntry(RedirectingFileSystem *FS, StringRef Name,
- Entry *ParentEntry = nullptr) {
- if (!ParentEntry) { // Look for a existent root
- for (const auto &Root : FS->Roots) {
- if (Name.equals(Root->getName())) {
- ParentEntry = Root.get();
- return ParentEntry;
- }
- }
- } else { // Advance to the next component
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
- for (std::unique_ptr<Entry> &Content :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- auto *DirContent = dyn_cast<RedirectingDirectoryEntry>(Content.get());
- if (DirContent && Name.equals(Content->getName()))
- return DirContent;
- }
- }
-
- // ... or create a new one
- std::unique_ptr<Entry> E = llvm::make_unique<RedirectingDirectoryEntry>(
- Name,
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
-
- if (!ParentEntry) { // Add a new root to the overlay
- FS->Roots.push_back(std::move(E));
- ParentEntry = FS->Roots.back().get();
- return ParentEntry;
- }
-
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(ParentEntry);
- DE->addContent(std::move(E));
- return DE->getLastContent();
- }
-
- void uniqueOverlayTree(RedirectingFileSystem *FS, Entry *SrcE,
- Entry *NewParentE = nullptr) {
- StringRef Name = SrcE->getName();
- switch (SrcE->getKind()) {
- case EK_Directory: {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
- assert(DE && "Must be a directory");
- // Empty directories could be present in the YAML as a way to
- // describe a file for a current directory after some of its subdir
- // is parsed. This only leads to redundant walks, ignore it.
- if (!Name.empty())
- NewParentE = lookupOrCreateEntry(FS, Name, NewParentE);
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end()))
- uniqueOverlayTree(FS, SubEntry.get(), NewParentE);
- break;
- }
- case EK_File: {
- auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
- assert(FE && "Must be a file");
- assert(NewParentE && "Parent entry must exist");
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(NewParentE);
- DE->addContent(llvm::make_unique<RedirectingFileEntry>(
- Name, FE->getExternalContentsPath(), FE->getUseName()));
- break;
- }
- }
- }
-
- std::unique_ptr<Entry> parseEntry(yaml::Node *N, RedirectingFileSystem *FS) {
- auto *M = dyn_cast<yaml::MappingNode>(N);
- if (!M) {
- error(N, "expected mapping node for file or directory entry");
- return nullptr;
- }
-
- KeyStatusPair Fields[] = {
- KeyStatusPair("name", true),
- KeyStatusPair("type", true),
- KeyStatusPair("contents", false),
- KeyStatusPair("external-contents", false),
- KeyStatusPair("use-external-name", false),
- };
-
- DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
-
- bool HasContents = false; // external or otherwise
- std::vector<std::unique_ptr<Entry>> EntryArrayContents;
- std::string ExternalContentsPath;
- std::string Name;
- auto UseExternalName = RedirectingFileEntry::NK_NotSet;
- EntryKind Kind;
-
- for (auto &I : *M) {
- StringRef Key;
- // Reuse the buffer for key and value, since we don't look at key after
- // parsing value.
- SmallString<256> Buffer;
- if (!parseScalarString(I.getKey(), Key, Buffer))
- return nullptr;
-
- if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
- return nullptr;
-
- StringRef Value;
- if (Key == "name") {
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
-
- if (FS->UseCanonicalizedPaths) {
- SmallString<256> Path(Value);
- // Guarantee that old YAML files containing paths with ".." and "."
- // are properly canonicalized before read into the VFS.
- Path = sys::path::remove_leading_dotslash(Path);
- sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- Name = Path.str();
- } else {
- Name = Value;
- }
- } else if (Key == "type") {
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
- if (Value == "file")
- Kind = EK_File;
- else if (Value == "directory")
- Kind = EK_Directory;
- else {
- error(I.getValue(), "unknown value for 'type'");
- return nullptr;
- }
- } else if (Key == "contents") {
- if (HasContents) {
- error(I.getKey(),
- "entry already has 'contents' or 'external-contents'");
- return nullptr;
- }
- HasContents = true;
- auto *Contents = dyn_cast<yaml::SequenceNode>(I.getValue());
- if (!Contents) {
- // FIXME: this is only for directories, what about files?
- error(I.getValue(), "expected array");
- return nullptr;
- }
-
- for (auto &I : *Contents) {
- if (std::unique_ptr<Entry> E = parseEntry(&I, FS))
- EntryArrayContents.push_back(std::move(E));
- else
- return nullptr;
- }
- } else if (Key == "external-contents") {
- if (HasContents) {
- error(I.getKey(),
- "entry already has 'contents' or 'external-contents'");
- return nullptr;
- }
- HasContents = true;
- if (!parseScalarString(I.getValue(), Value, Buffer))
- return nullptr;
-
- SmallString<256> FullPath;
- if (FS->IsRelativeOverlay) {
- FullPath = FS->getExternalContentsPrefixDir();
- assert(!FullPath.empty() &&
- "External contents prefix directory must exist");
- llvm::sys::path::append(FullPath, Value);
- } else {
- FullPath = Value;
- }
-
- if (FS->UseCanonicalizedPaths) {
- // Guarantee that old YAML files containing paths with ".." and "."
- // are properly canonicalized before read into the VFS.
- FullPath = sys::path::remove_leading_dotslash(FullPath);
- sys::path::remove_dots(FullPath, /*remove_dot_dot=*/true);
- }
- ExternalContentsPath = FullPath.str();
- } else if (Key == "use-external-name") {
- bool Val;
- if (!parseScalarBool(I.getValue(), Val))
- return nullptr;
- UseExternalName = Val ? RedirectingFileEntry::NK_External
- : RedirectingFileEntry::NK_Virtual;
- } else {
- llvm_unreachable("key missing from Keys");
- }
- }
-
- if (Stream.failed())
- return nullptr;
-
- // check for missing keys
- if (!HasContents) {
- error(N, "missing key 'contents' or 'external-contents'");
- return nullptr;
- }
- if (!checkMissingKeys(N, Keys))
- return nullptr;
-
- // check invalid configuration
- if (Kind == EK_Directory &&
- UseExternalName != RedirectingFileEntry::NK_NotSet) {
- error(N, "'use-external-name' is not supported for directories");
- return nullptr;
- }
-
- // Remove trailing slash(es), being careful not to remove the root path
- StringRef Trimmed(Name);
- size_t RootPathLen = sys::path::root_path(Trimmed).size();
- while (Trimmed.size() > RootPathLen &&
- sys::path::is_separator(Trimmed.back()))
- Trimmed = Trimmed.slice(0, Trimmed.size()-1);
- // Get the last component
- StringRef LastComponent = sys::path::filename(Trimmed);
-
- std::unique_ptr<Entry> Result;
- switch (Kind) {
- case EK_File:
- Result = llvm::make_unique<RedirectingFileEntry>(
- LastComponent, std::move(ExternalContentsPath), UseExternalName);
- break;
- case EK_Directory:
- Result = llvm::make_unique<RedirectingDirectoryEntry>(
- LastComponent, std::move(EntryArrayContents),
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
- break;
- }
-
- StringRef Parent = sys::path::parent_path(Trimmed);
- if (Parent.empty())
- return Result;
-
- // if 'name' contains multiple components, create implicit directory entries
- for (sys::path::reverse_iterator I = sys::path::rbegin(Parent),
- E = sys::path::rend(Parent);
- I != E; ++I) {
- std::vector<std::unique_ptr<Entry>> Entries;
- Entries.push_back(std::move(Result));
- Result = llvm::make_unique<RedirectingDirectoryEntry>(
- *I, std::move(Entries),
- Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
- 0, 0, 0, file_type::directory_file, sys::fs::all_all));
- }
- return Result;
- }
-
-public:
- RedirectingFileSystemParser(yaml::Stream &S) : Stream(S) {}
-
- // false on error
- bool parse(yaml::Node *Root, RedirectingFileSystem *FS) {
- auto *Top = dyn_cast<yaml::MappingNode>(Root);
- if (!Top) {
- error(Root, "expected mapping node");
- return false;
- }
-
- KeyStatusPair Fields[] = {
- KeyStatusPair("version", true),
- KeyStatusPair("case-sensitive", false),
- KeyStatusPair("use-external-names", false),
- KeyStatusPair("overlay-relative", false),
- KeyStatusPair("ignore-non-existent-contents", false),
- KeyStatusPair("roots", true),
- };
-
- DenseMap<StringRef, KeyStatus> Keys(std::begin(Fields), std::end(Fields));
- std::vector<std::unique_ptr<Entry>> RootEntries;
-
- // Parse configuration and 'roots'
- for (auto &I : *Top) {
- SmallString<10> KeyBuffer;
- StringRef Key;
- if (!parseScalarString(I.getKey(), Key, KeyBuffer))
- return false;
-
- if (!checkDuplicateOrUnknownKey(I.getKey(), Key, Keys))
- return false;
-
- if (Key == "roots") {
- auto *Roots = dyn_cast<yaml::SequenceNode>(I.getValue());
- if (!Roots) {
- error(I.getValue(), "expected array");
- return false;
- }
-
- for (auto &I : *Roots) {
- if (std::unique_ptr<Entry> E = parseEntry(&I, FS))
- RootEntries.push_back(std::move(E));
- else
- return false;
- }
- } else if (Key == "version") {
- StringRef VersionString;
- SmallString<4> Storage;
- if (!parseScalarString(I.getValue(), VersionString, Storage))
- return false;
- int Version;
- if (VersionString.getAsInteger<int>(10, Version)) {
- error(I.getValue(), "expected integer");
- return false;
- }
- if (Version < 0) {
- error(I.getValue(), "invalid version number");
- return false;
- }
- if (Version != 0) {
- error(I.getValue(), "version mismatch, expected 0");
- return false;
- }
- } else if (Key == "case-sensitive") {
- if (!parseScalarBool(I.getValue(), FS->CaseSensitive))
- return false;
- } else if (Key == "overlay-relative") {
- if (!parseScalarBool(I.getValue(), FS->IsRelativeOverlay))
- return false;
- } else if (Key == "use-external-names") {
- if (!parseScalarBool(I.getValue(), FS->UseExternalNames))
- return false;
- } else if (Key == "ignore-non-existent-contents") {
- if (!parseScalarBool(I.getValue(), FS->IgnoreNonExistentContents))
- return false;
- } else {
- llvm_unreachable("key missing from Keys");
- }
- }
-
- if (Stream.failed())
- return false;
-
- if (!checkMissingKeys(Top, Keys))
- return false;
-
- // Now that we sucessefully parsed the YAML file, canonicalize the internal
- // representation to a proper directory tree so that we can search faster
- // inside the VFS.
- for (auto &E : RootEntries)
- uniqueOverlayTree(FS, E.get());
-
- return true;
- }
-};
-
-} // namespace
-
-RedirectingFileSystem *
-RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath, void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- SourceMgr SM;
- yaml::Stream Stream(Buffer->getMemBufferRef(), SM);
-
- SM.setDiagHandler(DiagHandler, DiagContext);
- yaml::document_iterator DI = Stream.begin();
- yaml::Node *Root = DI->getRoot();
- if (DI == Stream.end() || !Root) {
- SM.PrintMessage(SMLoc(), SourceMgr::DK_Error, "expected root node");
- return nullptr;
- }
-
- RedirectingFileSystemParser P(Stream);
-
- std::unique_ptr<RedirectingFileSystem> FS(
- new RedirectingFileSystem(std::move(ExternalFS)));
-
- if (!YAMLFilePath.empty()) {
- // Use the YAML path from -ivfsoverlay to compute the dir to be prefixed
- // to each 'external-contents' path.
- //
- // Example:
- // -ivfsoverlay dummy.cache/vfs/vfs.yaml
- // yields:
- // FS->ExternalContentsPrefixDir => /<absolute_path_to>/dummy.cache/vfs
- //
- SmallString<256> OverlayAbsDir = sys::path::parent_path(YAMLFilePath);
- std::error_code EC = llvm::sys::fs::make_absolute(OverlayAbsDir);
- assert(!EC && "Overlay dir final path must be absolute");
- (void)EC;
- FS->setExternalContentsPrefixDir(OverlayAbsDir);
- }
-
- if (!P.parse(Root, FS.get()))
- return nullptr;
-
- return FS.release();
-}
-
-ErrorOr<Entry *> RedirectingFileSystem::lookupPath(const Twine &Path_) {
- SmallString<256> Path;
- Path_.toVector(Path);
-
- // Handle relative paths
- if (std::error_code EC = makeAbsolute(Path))
- return EC;
-
- // Canonicalize path by removing ".", "..", "./", etc components. This is
- // a VFS request, do bot bother about symlinks in the path components
- // but canonicalize in order to perform the correct entry search.
- if (UseCanonicalizedPaths) {
- Path = sys::path::remove_leading_dotslash(Path);
- sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- }
-
- if (Path.empty())
- return make_error_code(llvm::errc::invalid_argument);
-
- sys::path::const_iterator Start = sys::path::begin(Path);
- sys::path::const_iterator End = sys::path::end(Path);
- for (const auto &Root : Roots) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, Root.get());
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-ErrorOr<Entry *>
-RedirectingFileSystem::lookupPath(sys::path::const_iterator Start,
- sys::path::const_iterator End, Entry *From) {
-#ifndef _WIN32
- assert(!isTraversalComponent(*Start) &&
- !isTraversalComponent(From->getName()) &&
- "Paths should not contain traversal components");
-#else
- // FIXME: this is here to support windows, remove it once canonicalized
- // paths become globally default.
- if (Start->equals("."))
- ++Start;
-#endif
-
- StringRef FromName = From->getName();
-
- // Forward the search to the next component in case this is an empty one.
- if (!FromName.empty()) {
- if (CaseSensitive ? !Start->equals(FromName)
- : !Start->equals_lower(FromName))
- // failure to match
- return make_error_code(llvm::errc::no_such_file_or_directory);
-
- ++Start;
-
- if (Start == End) {
- // Match!
- return From;
- }
- }
-
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(From);
- if (!DE)
- return make_error_code(llvm::errc::not_a_directory);
-
- for (const std::unique_ptr<Entry> &DirEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- ErrorOr<Entry *> Result = lookupPath(Start, End, DirEntry.get());
- if (Result || Result.getError() != llvm::errc::no_such_file_or_directory)
- return Result;
- }
- return make_error_code(llvm::errc::no_such_file_or_directory);
-}
-
-static Status getRedirectedFileStatus(const Twine &Path, bool UseExternalNames,
- Status ExternalStatus) {
- Status S = ExternalStatus;
- if (!UseExternalNames)
- S = Status::copyWithNewName(S, Path.str());
- S.IsVFSMapped = true;
- return S;
-}
-
-ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path, Entry *E) {
- assert(E != nullptr);
- if (auto *F = dyn_cast<RedirectingFileEntry>(E)) {
- ErrorOr<Status> S = ExternalFS->status(F->getExternalContentsPath());
- assert(!S || S->getName() == F->getExternalContentsPath());
- if (S)
- return getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
- *S);
- return S;
- } else { // directory
- auto *DE = cast<RedirectingDirectoryEntry>(E);
- return Status::copyWithNewName(DE->getStatus(), Path.str());
- }
-}
-
-ErrorOr<Status> RedirectingFileSystem::status(const Twine &Path) {
- ErrorOr<Entry *> Result = lookupPath(Path);
- if (!Result)
- return Result.getError();
- return status(Path, *Result);
-}
-
-namespace {
-
-/// Provide a file wrapper with an overriden status.
-class FileWithFixedStatus : public File {
- std::unique_ptr<File> InnerFile;
- Status S;
-
-public:
- FileWithFixedStatus(std::unique_ptr<File> InnerFile, Status S)
- : InnerFile(std::move(InnerFile)), S(std::move(S)) {}
-
- ErrorOr<Status> status() override { return S; }
- ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-
- getBuffer(const Twine &Name, int64_t FileSize, bool RequiresNullTerminator,
- bool IsVolatile) override {
- return InnerFile->getBuffer(Name, FileSize, RequiresNullTerminator,
- IsVolatile);
- }
-
- std::error_code close() override { return InnerFile->close(); }
-};
-
-} // namespace
-
-ErrorOr<std::unique_ptr<File>>
-RedirectingFileSystem::openFileForRead(const Twine &Path) {
- ErrorOr<Entry *> E = lookupPath(Path);
- if (!E)
- return E.getError();
-
- auto *F = dyn_cast<RedirectingFileEntry>(*E);
- if (!F) // FIXME: errc::not_a_file?
- return make_error_code(llvm::errc::invalid_argument);
-
- auto Result = ExternalFS->openFileForRead(F->getExternalContentsPath());
- if (!Result)
- return Result;
-
- auto ExternalStatus = (*Result)->status();
- if (!ExternalStatus)
- return ExternalStatus.getError();
-
- // FIXME: Update the status with the name and VFSMapped.
- Status S = getRedirectedFileStatus(Path, F->useExternalName(UseExternalNames),
- *ExternalStatus);
- return std::unique_ptr<File>(
- llvm::make_unique<FileWithFixedStatus>(std::move(*Result), S));
-}
-
-IntrusiveRefCntPtr<FileSystem>
-vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- return RedirectingFileSystem::create(std::move(Buffer), DiagHandler,
- YAMLFilePath, DiagContext,
- std::move(ExternalFS));
-}
-
-static void getVFSEntries(Entry *SrcE, SmallVectorImpl<StringRef> &Path,
- SmallVectorImpl<YAMLVFSEntry> &Entries) {
- auto Kind = SrcE->getKind();
- if (Kind == EK_Directory) {
- auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
- assert(DE && "Must be a directory");
- for (std::unique_ptr<Entry> &SubEntry :
- llvm::make_range(DE->contents_begin(), DE->contents_end())) {
- Path.push_back(SubEntry->getName());
- getVFSEntries(SubEntry.get(), Path, Entries);
- Path.pop_back();
- }
- return;
- }
-
- assert(Kind == EK_File && "Must be a EK_File");
- auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
- assert(FE && "Must be a file");
- SmallString<128> VPath;
- for (auto &Comp : Path)
- llvm::sys::path::append(VPath, Comp);
- Entries.push_back(YAMLVFSEntry(VPath.c_str(), FE->getExternalContentsPath()));
-}
-
-void vfs::collectVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
- SourceMgr::DiagHandlerTy DiagHandler,
- StringRef YAMLFilePath,
- SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
- void *DiagContext,
- IntrusiveRefCntPtr<FileSystem> ExternalFS) {
- RedirectingFileSystem *VFS = RedirectingFileSystem::create(
- std::move(Buffer), DiagHandler, YAMLFilePath, DiagContext,
- std::move(ExternalFS));
- ErrorOr<Entry *> RootE = VFS->lookupPath("/");
- if (!RootE)
- return;
- SmallVector<StringRef, 8> Components;
- Components.push_back("/");
- getVFSEntries(*RootE, Components, CollectedEntries);
-}
-
-UniqueID vfs::getNextVirtualUniqueID() {
- static std::atomic<unsigned> UID;
- unsigned ID = ++UID;
- // The following assumes that uint64_t max will never collide with a real
- // dev_t value from the OS.
- return UniqueID(std::numeric_limits<uint64_t>::max(), ID);
-}
-
-void YAMLVFSWriter::addFileMapping(StringRef VirtualPath, StringRef RealPath) {
- assert(sys::path::is_absolute(VirtualPath) && "virtual path not absolute");
- assert(sys::path::is_absolute(RealPath) && "real path not absolute");
- assert(!pathHasTraversal(VirtualPath) && "path traversal is not supported");
- Mappings.emplace_back(VirtualPath, RealPath);
-}
-
-namespace {
-
-class JSONWriter {
- llvm::raw_ostream &OS;
- SmallVector<StringRef, 16> DirStack;
-
- unsigned getDirIndent() { return 4 * DirStack.size(); }
- unsigned getFileIndent() { return 4 * (DirStack.size() + 1); }
- bool containedIn(StringRef Parent, StringRef Path);
- StringRef containedPart(StringRef Parent, StringRef Path);
- void startDirectory(StringRef Path);
- void endDirectory();
- void writeEntry(StringRef VPath, StringRef RPath);
-
-public:
- JSONWriter(llvm::raw_ostream &OS) : OS(OS) {}
-
- void write(ArrayRef<YAMLVFSEntry> Entries, Optional<bool> UseExternalNames,
- Optional<bool> IsCaseSensitive, Optional<bool> IsOverlayRelative,
- Optional<bool> IgnoreNonExistentContents, StringRef OverlayDir);
-};
-
-} // namespace
-
-bool JSONWriter::containedIn(StringRef Parent, StringRef Path) {
- using namespace llvm::sys;
-
- // Compare each path component.
- auto IParent = path::begin(Parent), EParent = path::end(Parent);
- for (auto IChild = path::begin(Path), EChild = path::end(Path);
- IParent != EParent && IChild != EChild; ++IParent, ++IChild) {
- if (*IParent != *IChild)
- return false;
- }
- // Have we exhausted the parent path?
- return IParent == EParent;
-}
-
-StringRef JSONWriter::containedPart(StringRef Parent, StringRef Path) {
- assert(!Parent.empty());
- assert(containedIn(Parent, Path));
- return Path.slice(Parent.size() + 1, StringRef::npos);
-}
-
-void JSONWriter::startDirectory(StringRef Path) {
- StringRef Name =
- DirStack.empty() ? Path : containedPart(DirStack.back(), Path);
- DirStack.push_back(Path);
- unsigned Indent = getDirIndent();
- OS.indent(Indent) << "{\n";
- OS.indent(Indent + 2) << "'type': 'directory',\n";
- OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(Name) << "\",\n";
- OS.indent(Indent + 2) << "'contents': [\n";
-}
-
-void JSONWriter::endDirectory() {
- unsigned Indent = getDirIndent();
- OS.indent(Indent + 2) << "]\n";
- OS.indent(Indent) << "}";
-
- DirStack.pop_back();
-}
-
-void JSONWriter::writeEntry(StringRef VPath, StringRef RPath) {
- unsigned Indent = getFileIndent();
- OS.indent(Indent) << "{\n";
- OS.indent(Indent + 2) << "'type': 'file',\n";
- OS.indent(Indent + 2) << "'name': \"" << llvm::yaml::escape(VPath) << "\",\n";
- OS.indent(Indent + 2) << "'external-contents': \""
- << llvm::yaml::escape(RPath) << "\"\n";
- OS.indent(Indent) << "}";
-}
-
-void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
- Optional<bool> UseExternalNames,
- Optional<bool> IsCaseSensitive,
- Optional<bool> IsOverlayRelative,
- Optional<bool> IgnoreNonExistentContents,
- StringRef OverlayDir) {
- using namespace llvm::sys;
-
- OS << "{\n"
- " 'version': 0,\n";
- if (IsCaseSensitive.hasValue())
- OS << " 'case-sensitive': '"
- << (IsCaseSensitive.getValue() ? "true" : "false") << "',\n";
- if (UseExternalNames.hasValue())
- OS << " 'use-external-names': '"
- << (UseExternalNames.getValue() ? "true" : "false") << "',\n";
- bool UseOverlayRelative = false;
- if (IsOverlayRelative.hasValue()) {
- UseOverlayRelative = IsOverlayRelative.getValue();
- OS << " 'overlay-relative': '"
- << (UseOverlayRelative ? "true" : "false") << "',\n";
- }
- if (IgnoreNonExistentContents.hasValue())
- OS << " 'ignore-non-existent-contents': '"
- << (IgnoreNonExistentContents.getValue() ? "true" : "false") << "',\n";
- OS << " 'roots': [\n";
-
- if (!Entries.empty()) {
- const YAMLVFSEntry &Entry = Entries.front();
- startDirectory(path::parent_path(Entry.VPath));
-
- StringRef RPath = Entry.RPath;
- if (UseOverlayRelative) {
- unsigned OverlayDirLen = OverlayDir.size();
- assert(RPath.substr(0, OverlayDirLen) == OverlayDir &&
- "Overlay dir must be contained in RPath");
- RPath = RPath.slice(OverlayDirLen, RPath.size());
- }
-
- writeEntry(path::filename(Entry.VPath), RPath);
-
- for (const auto &Entry : Entries.slice(1)) {
- StringRef Dir = path::parent_path(Entry.VPath);
- if (Dir == DirStack.back())
- OS << ",\n";
- else {
- while (!DirStack.empty() && !containedIn(DirStack.back(), Dir)) {
- OS << "\n";
- endDirectory();
- }
- OS << ",\n";
- startDirectory(Dir);
- }
- StringRef RPath = Entry.RPath;
- if (UseOverlayRelative) {
- unsigned OverlayDirLen = OverlayDir.size();
- assert(RPath.substr(0, OverlayDirLen) == OverlayDir &&
- "Overlay dir must be contained in RPath");
- RPath = RPath.slice(OverlayDirLen, RPath.size());
- }
- writeEntry(path::filename(Entry.VPath), RPath);
- }
-
- while (!DirStack.empty()) {
- OS << "\n";
- endDirectory();
- }
- OS << "\n";
- }
-
- OS << " ]\n"
- << "}\n";
-}
-
-void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
- llvm::sort(Mappings.begin(), Mappings.end(),
- [](const YAMLVFSEntry &LHS, const YAMLVFSEntry &RHS) {
- return LHS.VPath < RHS.VPath;
- });
-
- JSONWriter(OS).write(Mappings, UseExternalNames, IsCaseSensitive,
- IsOverlayRelative, IgnoreNonExistentContents,
- OverlayDir);
-}
-
-VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
- const Twine &_Path, RedirectingFileSystem &FS,
- RedirectingDirectoryEntry::iterator Begin,
- RedirectingDirectoryEntry::iterator End, std::error_code &EC)
- : Dir(_Path.str()), FS(FS), Current(Begin), End(End) {
- while (Current != End) {
- SmallString<128> PathStr(Dir);
- llvm::sys::path::append(PathStr, (*Current)->getName());
- llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
- if (S) {
- CurrentEntry = *S;
- return;
- }
- // Skip entries which do not map to a reliable external content.
- if (FS.ignoreNonExistentContents() &&
- S.getError() == llvm::errc::no_such_file_or_directory) {
- ++Current;
- continue;
- } else {
- EC = S.getError();
- break;
- }
- }
-}
-
-std::error_code VFSFromYamlDirIterImpl::increment() {
- assert(Current != End && "cannot iterate past end");
- while (++Current != End) {
- SmallString<128> PathStr(Dir);
- llvm::sys::path::append(PathStr, (*Current)->getName());
- llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
- if (!S) {
- // Skip entries which do not map to a reliable external content.
- if (FS.ignoreNonExistentContents() &&
- S.getError() == llvm::errc::no_such_file_or_directory) {
- continue;
- } else {
- return S.getError();
- }
- }
- CurrentEntry = *S;
- break;
- }
-
- if (Current == End)
- CurrentEntry = Status();
- return {};
-}
-
-vfs::recursive_directory_iterator::recursive_directory_iterator(FileSystem &FS_,
- const Twine &Path,
- std::error_code &EC)
- : FS(&FS_) {
- directory_iterator I = FS->dir_begin(Path, EC);
- if (I != directory_iterator()) {
- State = std::make_shared<IterState>();
- State->push(I);
- }
-}
-
-vfs::recursive_directory_iterator &
-recursive_directory_iterator::increment(std::error_code &EC) {
- assert(FS && State && !State->empty() && "incrementing past end");
- assert(State->top()->isStatusKnown() && "non-canonical end iterator");
- vfs::directory_iterator End;
- if (State->top()->isDirectory()) {
- vfs::directory_iterator I = FS->dir_begin(State->top()->getName(), EC);
- if (I != End) {
- State->push(I);
- return *this;
- }
- }
-
- while (!State->empty() && State->top().increment(EC) == End)
- State->pop();
-
- if (State->empty())
- State.reset(); // end iterator
-
- return *this;
-}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 415bd9626220..b927acabac59 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -8,10 +8,10 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetOptions.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -37,6 +37,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
@@ -54,10 +55,13 @@
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Utils.h"
+#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
@@ -235,11 +239,12 @@ static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
+ bool UseOdrIndicator = CGOpts.SanitizeAddressUseOdrIndicator;
bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
UseAfterScope));
PM.add(createAddressSanitizerModulePass(/*CompileKernel*/ false, Recover,
- UseGlobalsGC));
+ UseGlobalsGC, UseOdrIndicator));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
@@ -247,7 +252,8 @@ static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
PM.add(createAddressSanitizerFunctionPass(
/*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false));
PM.add(createAddressSanitizerModulePass(
- /*CompileKernel*/ true, /*Recover*/ true));
+ /*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
+ /*UseOdrIndicator*/ false));
}
static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
@@ -265,14 +271,15 @@ static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
/*CompileKernel*/ true, /*Recover*/ true));
}
-static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
+static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM,
+ bool CompileKernel) {
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PM.add(createMemorySanitizerPass(TrackOrigins, Recover));
+ PM.add(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover, CompileKernel));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -287,9 +294,19 @@ static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
}
}
+static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ false);
+}
+
+static void addKernelMemorySanitizerPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ true);
+}
+
static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
- PM.add(createThreadSanitizerPass());
+ PM.add(createThreadSanitizerLegacyPassPass());
}
static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
@@ -368,6 +385,7 @@ static CodeGenOpt::Level getCGOptLevel(const CodeGenOptions &CodeGenOpts) {
static Optional<llvm::CodeModel::Model>
getCodeModel(const CodeGenOptions &CodeGenOpts) {
unsigned CodeModel = llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel)
+ .Case("tiny", llvm::CodeModel::Tiny)
.Case("small", llvm::CodeModel::Small)
.Case("kernel", llvm::CodeModel::Kernel)
.Case("medium", llvm::CodeModel::Medium)
@@ -416,7 +434,7 @@ static void initTargetOptions(llvm::TargetOptions &Options,
switch (LangOpts.getDefaultFPContractMode()) {
case LangOptions::FPC_Off:
// Preserve any contraction performed by the front-end. (Strict performs
- // splitting of the muladd instrinsic in the backend.)
+ // splitting of the muladd intrinsic in the backend.)
Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
break;
case LangOptions::FPC_On:
@@ -456,7 +474,7 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
- if (CodeGenOpts.EnableSplitDwarf)
+ if (CodeGenOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
@@ -491,6 +509,8 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts) {
Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData;
+ Options.Filter = CodeGenOpts.ProfileFilterFiles;
+ Options.Exclude = CodeGenOpts.ProfileExcludeFiles;
Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody;
return Options;
}
@@ -613,6 +633,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addMemorySanitizerPass);
}
+ if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addKernelMemorySanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addKernelMemorySanitizerPass);
+ }
+
if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addThreadSanitizerPass);
@@ -653,6 +680,11 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
InstrProfOptions Options;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
+
+ // TODO: Surface the option to emit atomic profile counter increments at
+ // the driver level.
+ Options.Atomic = LangOpts.Sanitize.has(SanitizerKind::Thread);
+
MPM.add(createInstrProfilingLegacyPass(Options));
}
if (CodeGenOpts.hasProfileIRInstr()) {
@@ -777,12 +809,14 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
case Backend_EmitBC:
- if (CodeGenOpts.PrepareForThinLTO) {
+ if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
if (!ThinLinkOS)
return;
}
+ TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
+ CodeGenOpts.EnableSplitLTOUnit);
PerModulePasses.add(createWriteThinLTOBitcodePass(
*OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
} else {
@@ -790,14 +824,18 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
// targets
bool EmitLTOSummary =
(CodeGenOpts.PrepareForLTO &&
+ !CodeGenOpts.DisableLLVMPasses &&
llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
llvm::Triple::Apple);
- if (EmitLTOSummary && !TheModule->getModuleFlag("ThinLTO"))
- TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
+ if (EmitLTOSummary) {
+ if (!TheModule->getModuleFlag("ThinLTO"))
+ TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
+ TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
+ CodeGenOpts.EnableSplitLTOUnit);
+ }
- PerModulePasses.add(
- createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
- EmitLTOSummary));
+ PerModulePasses.add(createBitcodeWriterPass(
+ *OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));
}
break;
@@ -807,7 +845,8 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
break;
default:
- if (!CodeGenOpts.SplitDwarfFile.empty()) {
+ if (!CodeGenOpts.SplitDwarfFile.empty() &&
+ (CodeGenOpts.getSplitDwarfMode() == CodeGenOptions::SplitFileFission)) {
DwoOS = openOutputFile(CodeGenOpts.SplitDwarfFile);
if (!DwoOS)
return;
@@ -905,18 +944,21 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
? DefaultProfileGenName
: CodeGenOpts.InstrProfileOutput,
- "", "", true, CodeGenOpts.DebugInfoForProfiling);
+ "", "", "", true,
+ CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.hasProfileIRUse())
// -fprofile-use.
- PGOOpt = PGOOptions("", CodeGenOpts.ProfileInstrumentUsePath, "", false,
+ PGOOpt = PGOOptions("", CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile, false,
CodeGenOpts.DebugInfoForProfiling);
else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
- PGOOpt = PGOOptions("", "", CodeGenOpts.SampleProfileFile, false,
+ PGOOpt = PGOOptions("", "", CodeGenOpts.SampleProfileFile,
+ CodeGenOpts.ProfileRemappingFile, false,
CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
- PGOOpt = PGOOptions("", "", "", false, true);
+ PGOOpt = PGOOptions("", "", "", "", false, true);
PassBuilder PB(TM.get(), PGOOpt);
@@ -961,9 +1003,11 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
MPM.addPass(createModuleToFunctionPassAdaptor(BoundsCheckingPass()));
- // Lastly, add a semantically necessary pass for LTO.
- if (IsLTO || IsThinLTO)
+ // Lastly, add semantically necessary passes for LTO.
+ if (IsLTO || IsThinLTO) {
+ MPM.addPass(CanonicalizeAliasesPass());
MPM.addPass(NameAnonGlobalPass());
+ }
} else {
// Map our optimization levels into one of the distinct levels used to
// configure the pipeline.
@@ -984,10 +1028,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
if (IsThinLTO) {
MPM = PB.buildThinLTOPreLinkDefaultPipeline(
Level, CodeGenOpts.DebugPassManager);
+ MPM.addPass(CanonicalizeAliasesPass());
MPM.addPass(NameAnonGlobalPass());
} else if (IsLTO) {
MPM = PB.buildLTOPreLinkDefaultPipeline(Level,
CodeGenOpts.DebugPassManager);
+ MPM.addPass(CanonicalizeAliasesPass());
MPM.addPass(NameAnonGlobalPass());
} else {
MPM = PB.buildPerModuleDefaultPipeline(Level,
@@ -1008,12 +1054,14 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
break;
case Backend_EmitBC:
- if (CodeGenOpts.PrepareForThinLTO) {
+ if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
if (!ThinLinkOS)
return;
}
+ TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
+ CodeGenOpts.EnableSplitLTOUnit);
MPM.addPass(ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &ThinLinkOS->os()
: nullptr));
} else {
@@ -1021,13 +1069,17 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// targets
bool EmitLTOSummary =
(CodeGenOpts.PrepareForLTO &&
+ !CodeGenOpts.DisableLLVMPasses &&
llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
llvm::Triple::Apple);
- if (EmitLTOSummary && !TheModule->getModuleFlag("ThinLTO"))
- TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
-
- MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
- EmitLTOSummary));
+ if (EmitLTOSummary) {
+ if (!TheModule->getModuleFlag("ThinLTO"))
+ TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
+ TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
+ CodeGenOpts.EnableSplitLTOUnit);
+ }
+ MPM.addPass(
+ BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));
}
break;
@@ -1104,6 +1156,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
const LangOptions &LOpts,
std::unique_ptr<raw_pwrite_stream> OS,
std::string SampleProfile,
+ std::string ProfileRemapping,
BackendAction Action) {
StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
@@ -1121,15 +1174,14 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
continue;
auto GUID = GlobalList.first;
- assert(GlobalList.second.SummaryList.size() == 1 &&
- "Expected individual combined index to have one summary per GUID");
- auto &Summary = GlobalList.second.SummaryList[0];
- // Skip the summaries for the importing module. These are included to
- // e.g. record required linkage changes.
- if (Summary->modulePath() == M->getModuleIdentifier())
- continue;
- // Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ for (auto &Summary : GlobalList.second.SummaryList) {
+ // Skip the summaries for the importing module. These are included to
+ // e.g. record required linkage changes.
+ if (Summary->modulePath() == M->getModuleIdentifier())
+ continue;
+ // Add an entry to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()].insert(GUID);
+ }
}
std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
@@ -1176,6 +1228,7 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
Conf.CGOptLevel = getCGOptLevel(CGOpts);
initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
+ Conf.ProfileRemapping = std::move(ProfileRemapping);
Conf.UseNewPM = CGOpts.ExperimentalNewPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
@@ -1242,7 +1295,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
if (!CombinedIndex->skipModuleByDistributedBackend()) {
runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
LOpts, std::move(OS), CGOpts.SampleProfileFile,
- Action);
+ CGOpts.ProfileRemappingFile, Action);
return;
}
// Distributed indexing detected that nothing from the module is needed
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index b34bcdc1fc38..24056a449def 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -18,7 +18,7 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@@ -765,11 +765,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
- bool UseLibcall = ((Ptr.getAlignment() % sizeChars) != 0 ||
- getContext().toBits(sizeChars) > MaxInlineWidthInBits);
- if (UseLibcall)
- CGM.getDiags().Report(E->getLocStart(), diag::warn_atomic_op_misaligned);
+ bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
+ bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
+ bool UseLibcall = Misaligned | Oversized;
+
+ if (UseLibcall) {
+ CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
+ << !Oversized;
+ }
llvm::Value *Order = EmitScalarExpr(E->getOrder());
llvm::Value *Scope =
@@ -923,6 +927,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
UseOptimizedLibcall = true;
break;
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ // Use the generic version if we don't know that the operand will be
+ // suitably aligned for the optimized version.
+ if (Misaligned)
+ break;
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
@@ -934,14 +947,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_load_n:
- case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_store_n:
- case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_exchange_n:
- case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
- case AtomicExpr::AO__atomic_compare_exchange:
// Only use optimized library calls for sizes for which they exist.
+ // FIXME: Size == 16 optimized library functions exist too.
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
UseOptimizedLibcall = true;
break;
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 8269b5b229a2..fa3c3ee8610c 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CGBlocks.h"
+#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
@@ -25,6 +26,7 @@
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/ScopedPrinter.h"
#include <algorithm>
#include <cstdio>
@@ -34,8 +36,8 @@ using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
: Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- LocalAddress(Address::invalid()), StructureType(nullptr), Block(block),
- DominatingIP(nullptr) {
+ CapturesNonExternalType(false), LocalAddress(Address::invalid()),
+ StructureType(nullptr), Block(block), DominatingIP(nullptr) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -63,6 +65,110 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
}
+namespace {
+
+/// Represents a type of copy/destroy operation that should be performed for an
+/// entity that's captured by a block.
+enum class BlockCaptureEntityKind {
+ CXXRecord, // Copy or destroy
+ ARCWeak,
+ ARCStrong,
+ NonTrivialCStruct,
+ BlockObject, // Assign or release
+ None
+};
+
+/// Represents a captured entity that requires extra operations in order for
+/// this entity to be copied or destroyed correctly.
+struct BlockCaptureManagedEntity {
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+ const BlockDecl::Capture *CI;
+ const CGBlockInfo::Capture *Capture;
+
+ BlockCaptureManagedEntity(BlockCaptureEntityKind CopyType,
+ BlockCaptureEntityKind DisposeType,
+ BlockFieldFlags CopyFlags,
+ BlockFieldFlags DisposeFlags,
+ const BlockDecl::Capture &CI,
+ const CGBlockInfo::Capture &Capture)
+ : CopyKind(CopyType), DisposeKind(DisposeType), CopyFlags(CopyFlags),
+ DisposeFlags(DisposeFlags), CI(&CI), Capture(&Capture) {}
+
+ bool operator<(const BlockCaptureManagedEntity &Other) const {
+ return Capture->getOffset() < Other.Capture->getOffset();
+ }
+};
+
+enum class CaptureStrKind {
+ // String for the copy helper.
+ CopyHelper,
+ // String for the dispose helper.
+ DisposeHelper,
+ // Merge the strings for the copy helper and dispose helper.
+ Merged
+};
+
+} // end anonymous namespace
+
+static void findBlockCapturedManagedEntities(
+ const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
+ SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures);
+
+static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+ CaptureStrKind StrKind,
+ CharUnits BlockAlignment,
+ CodeGenModule &CGM);
+
+static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
+ CodeGenModule &CGM) {
+ std::string Name = "__block_descriptor_";
+ Name += llvm::to_string(BlockInfo.BlockSize.getQuantity()) + "_";
+
+ if (BlockInfo.needsCopyDisposeHelpers()) {
+ if (CGM.getLangOpts().Exceptions)
+ Name += "e";
+ if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Name += "a";
+ Name += llvm::to_string(BlockInfo.BlockAlign.getQuantity()) + "_";
+
+ SmallVector<BlockCaptureManagedEntity, 4> ManagedCaptures;
+ findBlockCapturedManagedEntities(BlockInfo, CGM.getContext().getLangOpts(),
+ ManagedCaptures);
+
+ for (const BlockCaptureManagedEntity &E : ManagedCaptures) {
+ Name += llvm::to_string(E.Capture->getOffset().getQuantity());
+
+ if (E.CopyKind == E.DisposeKind) {
+ // If CopyKind and DisposeKind are the same, merge the capture
+ // information.
+ assert(E.CopyKind != BlockCaptureEntityKind::None &&
+ "shouldn't see BlockCaptureManagedEntity that is None");
+ Name += getBlockCaptureStr(E, CaptureStrKind::Merged,
+ BlockInfo.BlockAlign, CGM);
+ } else {
+ // If CopyKind and DisposeKind are not the same, which can happen when
+ // either Kind is None or the captured object is a __strong block,
+ // concatenate the copy and dispose strings.
+ Name += getBlockCaptureStr(E, CaptureStrKind::CopyHelper,
+ BlockInfo.BlockAlign, CGM);
+ Name += getBlockCaptureStr(E, CaptureStrKind::DisposeHelper,
+ BlockInfo.BlockAlign, CGM);
+ }
+ }
+ Name += "_";
+ }
+
+ std::string TypeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(BlockInfo.getBlockExpr());
+ /// Replace occurrences of '@' with '\1'. '@' is reserved on ELF platforms as
+ /// a separator between symbol name and symbol version.
+ std::replace(TypeAtEncoding.begin(), TypeAtEncoding.end(), '@', '\1');
+ Name += "e" + llvm::to_string(TypeAtEncoding.size()) + "_" + TypeAtEncoding;
+ Name += "l" + CGM.getObjCRuntime().getRCBlockLayoutStr(CGM, BlockInfo);
+ return Name;
+}
+
/// buildBlockDescriptor - Build the block descriptor meta-data for a block.
/// buildBlockDescriptor is accessed from 5th field of the Block_literal
/// meta-data and contains stationary information about the block literal.
@@ -72,7 +178,7 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
/// unsigned long reserved;
/// unsigned long size; // size of Block_literal metadata in bytes.
/// void *copy_func_helper_decl; // optional copy helper.
-/// void *destroy_func_decl; // optioanl destructor helper.
+/// void *destroy_func_decl; // optional destructor helper.
/// void *block_method_encoding_address; // @encode for block literal signature.
/// void *block_layout_info; // encoding of captured block variables.
/// };
@@ -91,6 +197,19 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
else
i8p = CGM.VoidPtrTy;
+ std::string descName;
+
+ // If an equivalent block descriptor global variable exists, return it.
+ if (C.getLangOpts().ObjC &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ descName = getBlockDescriptorName(blockInfo, CGM);
+ if (llvm::GlobalValue *desc = CGM.getModule().getNamedValue(descName))
+ return llvm::ConstantExpr::getBitCast(desc,
+ CGM.getBlockDescriptorType());
+ }
+
+ // If there isn't an equivalent block descriptor global variable, create a new
+ // one.
ConstantInitBuilder builder(CGM);
auto elements = builder.beginStruct();
@@ -104,12 +223,20 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
elements.addInt(ulong, blockInfo.BlockSize.getQuantity());
// Optional copy/dispose helpers.
+ bool hasInternalHelper = false;
if (blockInfo.needsCopyDisposeHelpers()) {
// copy_func_helper_decl
- elements.add(buildCopyHelper(CGM, blockInfo));
+ llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo);
+ elements.add(copyHelper);
// destroy_func_decl
- elements.add(buildDisposeHelper(CGM, blockInfo));
+ llvm::Constant *disposeHelper = buildDisposeHelper(CGM, blockInfo);
+ elements.add(disposeHelper);
+
+ if (cast<llvm::Function>(copyHelper->getOperand(0))->hasInternalLinkage() ||
+ cast<llvm::Function>(disposeHelper->getOperand(0))
+ ->hasInternalLinkage())
+ hasInternalHelper = true;
}
// Signature. Mandatory ObjC-style method descriptor @encode sequence.
@@ -119,7 +246,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
- if (C.getLangOpts().ObjC1) {
+ if (C.getLangOpts().ObjC) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
elements.add(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
@@ -132,12 +259,26 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
if (C.getLangOpts().OpenCL)
AddrSpace = C.getTargetAddressSpace(LangAS::opencl_constant);
+ llvm::GlobalValue::LinkageTypes linkage;
+ if (descName.empty()) {
+ linkage = llvm::GlobalValue::InternalLinkage;
+ descName = "__block_descriptor_tmp";
+ } else if (hasInternalHelper) {
+ // If either the copy helper or the dispose helper has internal linkage,
+ // the block descriptor must have internal linkage too.
+ linkage = llvm::GlobalValue::InternalLinkage;
+ } else {
+ linkage = llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
llvm::GlobalVariable *global =
- elements.finishAndCreateGlobal("__block_descriptor_tmp",
- CGM.getPointerAlign(),
- /*constant*/ true,
- llvm::GlobalValue::InternalLinkage,
- AddrSpace);
+ elements.finishAndCreateGlobal(descName, CGM.getPointerAlign(),
+ /*constant*/ true, linkage, AddrSpace);
+
+ if (linkage == llvm::GlobalValue::LinkOnceODRLinkage) {
+ global->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ global->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ }
return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
@@ -308,12 +449,25 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
assert(elementTypes.empty());
if (CGM.getLangOpts().OpenCL) {
- // The header is basically 'struct { int; int;
+ // The header is basically 'struct { int; int; generic void *;
// custom_fields; }'. Assert that struct is packed.
+ auto GenericAS =
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic);
+ auto GenPtrAlign =
+ CharUnits::fromQuantity(CGM.getTarget().getPointerAlign(GenericAS) / 8);
+ auto GenPtrSize =
+ CharUnits::fromQuantity(CGM.getTarget().getPointerWidth(GenericAS) / 8);
+ assert(CGM.getIntSize() <= GenPtrSize);
+ assert(CGM.getIntAlign() <= GenPtrAlign);
+ assert((2 * CGM.getIntSize()).isMultipleOf(GenPtrAlign));
elementTypes.push_back(CGM.IntTy); /* total size */
elementTypes.push_back(CGM.IntTy); /* align */
- unsigned Offset = 2 * CGM.getIntSize().getQuantity();
- unsigned BlockAlign = CGM.getIntAlign().getQuantity();
+ elementTypes.push_back(
+ CGM.getOpenCLRuntime()
+ .getGenericVoidPointerType()); /* invoke function */
+ unsigned Offset =
+ 2 * CGM.getIntSize().getQuantity() + GenPtrSize.getQuantity();
+ unsigned BlockAlign = GenPtrAlign.getQuantity();
if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldTypes()) /* custom fields */ {
@@ -355,7 +509,11 @@ static QualType getCaptureFieldType(const CodeGenFunction &CGF,
return CGF.BlockInfo->getCapture(VD).fieldType();
if (auto *FD = CGF.LambdaCaptureFields.lookup(VD))
return FD->getType();
- return VD->getType();
+ // If the captured variable is a non-escaping __block variable, the field
+ // type is the reference type. If the variable is a __block variable that
+ // already has a reference type, the field type is the variable's type.
+ return VD->isNonEscapingByref() ?
+ CGF.getContext().getLValueReferenceType(VD->getType()) : VD->getType();
}
/// Compute the layout of the given block. Attempts to lay the block
@@ -378,7 +536,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.CanBeGlobal = true;
return;
}
- else if (C.getLangOpts().ObjC1 &&
+ else if (C.getLangOpts().ObjC &&
CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
@@ -393,7 +551,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
if (block->capturesCXXThis()) {
assert(CGF && CGF->CurFuncDecl && isa<CXXMethodDecl>(CGF->CurFuncDecl) &&
"Can't capture 'this' outside a method");
- QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C);
+ QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType();
// Theoretically, this could be in a different address space, so
// don't assume standard pointer size/align.
@@ -411,7 +569,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
for (const auto &CI : block->captures()) {
const VarDecl *variable = CI.getVariable();
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
// We have to copy/dispose of the __block reference.
info.NeedsCopyDispose = true;
@@ -419,6 +577,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CharUnits align = CGM.getPointerAlign();
maxFieldAlign = std::max(maxFieldAlign, align);
+ // Since a __block variable cannot be captured by lambdas, its type and
+ // the capture field type should always match.
+ assert(getCaptureFieldType(*CGF, CI) == variable->getType() &&
+ "capture type differs from the variable type");
layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
Qualifiers::OCL_None, &CI,
CGM.VoidPtrTy, variable->getType()));
@@ -432,10 +594,11 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
continue;
}
+ QualType VT = getCaptureFieldType(*CGF, CI);
+
// If we have a lifetime qualifier, honor it for capture purposes.
// That includes *not* copying it if it's __unsafe_unretained.
- Qualifiers::ObjCLifetime lifetime =
- variable->getType().getObjCLifetime();
+ Qualifiers::ObjCLifetime lifetime = VT.getObjCLifetime();
if (lifetime) {
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
@@ -449,10 +612,10 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
// Block pointers require copy/dispose. So do Objective-C pointers.
- } else if (variable->getType()->isObjCRetainableType()) {
+ } else if (VT->isObjCRetainableType()) {
// But honor the inert __unsafe_unretained qualifier, which doesn't
// actually make it into the type system.
- if (variable->getType()->isObjCInertUnsafeUnretainedType()) {
+ if (VT->isObjCInertUnsafeUnretainedType()) {
lifetime = Qualifiers::OCL_ExplicitNone;
} else {
info.NeedsCopyDispose = true;
@@ -464,27 +627,27 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
} else if (CI.hasCopyExpr()) {
info.NeedsCopyDispose = true;
info.HasCXXObject = true;
+ if (!VT->getAsCXXRecordDecl()->isExternallyVisible())
+ info.CapturesNonExternalType = true;
// So do C structs that require non-trivial copy construction or
// destruction.
- } else if (variable->getType().isNonTrivialToPrimitiveCopy() ==
- QualType::PCK_Struct ||
- variable->getType().isDestructedType() ==
- QualType::DK_nontrivial_c_struct) {
+ } else if (VT.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct ||
+ VT.isDestructedType() == QualType::DK_nontrivial_c_struct) {
info.NeedsCopyDispose = true;
// And so do types with destructors.
} else if (CGM.getLangOpts().CPlusPlus) {
- if (const CXXRecordDecl *record =
- variable->getType()->getAsCXXRecordDecl()) {
+ if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl()) {
if (!record->hasTrivialDestructor()) {
info.HasCXXObject = true;
info.NeedsCopyDispose = true;
+ if (!record->isExternallyVisible())
+ info.CapturesNonExternalType = true;
}
}
}
- QualType VT = getCaptureFieldType(*CGF, CI);
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -699,10 +862,12 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
/// Enter a full-expression with a non-trivial number of objects to
/// clean up. This is in this file because, at the moment, the only
/// kind of cleanup object is a BlockDecl*.
-void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
- assert(E->getNumObjects() != 0);
- for (const ExprWithCleanups::CleanupObject &C : E->getObjects())
- enterBlockScope(*this, C);
+void CodeGenFunction::enterNonTrivialFullExpression(const FullExpr *E) {
+ if (const auto EWC = dyn_cast<ExprWithCleanups>(E)) {
+ assert(EWC->getNumObjects() != 0);
+ for (const ExprWithCleanups::CleanupObject &C : EWC->getObjects())
+ enterBlockScope(*this, C);
+ }
}
/// Find the layout for the given block in a linked list and remove it.
@@ -759,12 +924,20 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
bool IsOpenCL = CGM.getContext().getLangOpts().OpenCL;
+ auto GenVoidPtrTy =
+ IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy;
+ LangAS GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default;
+ auto GenVoidPtrSize = CharUnits::fromQuantity(
+ CGM.getTarget().getPointerWidth(
+ CGM.getContext().getTargetAddressSpace(GenVoidPtrAddr)) /
+ 8);
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
CodeGenFunction BlockCGF{CGM, true};
BlockCGF.SanOpts = SanOpts;
auto *InvokeFn = BlockCGF.GenerateBlockFunction(
CurGD, blockInfo, LocalDeclMap, isLambdaConv, blockInfo.CanBeGlobal);
+ auto *blockFn = llvm::ConstantExpr::getPointerCast(InvokeFn, GenVoidPtrTy);
// If there is nothing to capture, we can emit this as a global block.
if (blockInfo.CanBeGlobal)
@@ -840,12 +1013,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()),
getIntSize(), "block.align");
}
- if (!IsOpenCL) {
- addHeaderField(llvm::ConstantExpr::getBitCast(InvokeFn, VoidPtrTy),
- getPointerSize(), "block.invoke");
+ addHeaderField(blockFn, GenVoidPtrSize, "block.invoke");
+ if (!IsOpenCL)
addHeaderField(descriptor, getPointerSize(), "block.descriptor");
- } else if (auto *Helper =
- CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ else if (auto *Helper =
+ CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
for (auto I : Helper->getCustomFieldValues(*this, blockInfo)) {
addHeaderField(
I.first,
@@ -889,7 +1061,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// The lambda capture in a lambda's conversion-to-block-pointer is
// special; we'll simply emit it directly.
src = Address::invalid();
- } else if (CI.isByRef()) {
+ } else if (CI.isEscapingByref()) {
if (BlockInfo && CI.isNested()) {
// We need to use the capture from the enclosing block.
const CGBlockInfo::Capture &enclosingCapture =
@@ -906,7 +1078,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
src = I->second;
}
} else {
- DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+ DeclRefExpr declRef(getContext(), const_cast<VarDecl *>(variable),
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
type.getNonReferenceType(), VK_LValue,
SourceLocation());
@@ -917,7 +1089,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// the block field. There's no need to chase the forwarding
// pointer at this point, since we're building something that will
// live a shorter life than the stack byref anyway.
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
// Get a void* that points to the byref struct.
llvm::Value *byrefPointer;
if (CI.isNested())
@@ -980,7 +1152,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// We use one of these or the other depending on whether the
// reference is nested.
- DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+ DeclRefExpr declRef(getContext(), const_cast<VarDecl *>(variable),
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
type, VK_LValue, SourceLocation());
@@ -1049,23 +1221,38 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
}
llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
- assert(!getLangOpts().OpenCL && "OpenCL does not need this");
-
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
- // struct __block_literal_generic {
- // void *__isa;
- // int __flags;
- // int __reserved;
- // void (*__invoke)(void *);
- // struct __block_descriptor *__descriptor;
- // };
- GenericBlockLiteralType =
- llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
- IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
+ if (getLangOpts().OpenCL) {
+ // struct __opencl_block_literal_generic {
+ // int __size;
+ // int __align;
+ // __generic void *__invoke;
+ // /* custom fields */
+ // };
+ SmallVector<llvm::Type *, 8> StructFields(
+ {IntTy, IntTy, getOpenCLRuntime().getGenericVoidPointerType()});
+ if (auto *Helper = getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
+ for (auto I : Helper->getCustomFieldTypes())
+ StructFields.push_back(I);
+ }
+ GenericBlockLiteralType = llvm::StructType::create(
+ StructFields, "struct.__opencl_block_literal_generic");
+ } else {
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy,
+ IntTy, IntTy, VoidPtrTy, BlockDescPtrTy);
+ }
return GenericBlockLiteralType;
}
@@ -1076,21 +1263,27 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
E->getCallee()->getType()->getAs<BlockPointerType>();
llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
- llvm::Value *FuncPtr;
- if (!CGM.getLangOpts().OpenCL) {
- // Get a pointer to the generic block literal.
- llvm::Type *BlockLiteralTy =
- llvm::PointerType::get(CGM.getGenericBlockLiteralType(), 0);
+ // Get a pointer to the generic block literal.
+ // For OpenCL we generate generic AS void ptr to be able to reuse the same
+ // block definition for blocks with captures generated as private AS local
+ // variables and without captures generated as global AS program scope
+ // variables.
+ unsigned AddrSpace = 0;
+ if (getLangOpts().OpenCL)
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- // Bitcast the callee to a block literal.
- BlockPtr =
- Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::get(CGM.getGenericBlockLiteralType(), AddrSpace);
- // Get the function pointer from the literal.
- FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
- }
+ // Bitcast the callee to a block literal.
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr =
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr,
+ CGM.getLangOpts().OpenCL ? 2 : 3);
// Add the block literal.
CallArgList Args;
@@ -1113,11 +1306,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- llvm::Value *Func;
- if (CGM.getLangOpts().OpenCL)
- Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
- else
- Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ llvm::Value *Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
const CGFunctionInfo &FnInfo =
@@ -1136,8 +1325,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
-Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
- bool isByRef) {
+Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
assert(BlockInfo && "evaluating block ref without block information?");
const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
@@ -1148,7 +1336,7 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
capture.getOffset(), "block.capture.addr");
- if (isByRef) {
+ if (variable->isEscapingByref()) {
// addr should be a void** right now. Load, then cast the result
// to byref*.
@@ -1162,6 +1350,10 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
variable->getName());
}
+ assert((!variable->isNonEscapingByref() ||
+ capture.fieldType()->isReferenceType()) &&
+ "the capture field of a non-escaping variable should have a "
+ "reference type");
if (capture.fieldType()->isReferenceType())
addr = EmitLoadOfReference(MakeAddrLValue(addr, capture.fieldType()));
@@ -1213,9 +1405,13 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
auto fields = builder.beginStruct();
bool IsOpenCL = CGM.getLangOpts().OpenCL;
+ bool IsWindows = CGM.getTarget().getTriple().isOSWindows();
if (!IsOpenCL) {
// isa
- fields.add(CGM.getNSConcreteGlobalBlock());
+ if (IsWindows)
+ fields.addNullPointer(CGM.Int8PtrPtrTy);
+ else
+ fields.add(CGM.getNSConcreteGlobalBlock());
// __flags
BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
@@ -1226,14 +1422,14 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Reserved
fields.addInt(CGM.IntTy, 0);
-
- // Function
- fields.add(blockFn);
} else {
fields.addInt(CGM.IntTy, blockInfo.BlockSize.getQuantity());
fields.addInt(CGM.IntTy, blockInfo.BlockAlign.getQuantity());
}
+ // Function
+ fields.add(blockFn);
+
if (!IsOpenCL) {
// Descriptor
fields.add(buildBlockDescriptor(CGM, blockInfo));
@@ -1250,7 +1446,27 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::Constant *literal = fields.finishAndCreateGlobal(
"__block_literal_global", blockInfo.BlockAlign,
- /*constant*/ true, llvm::GlobalVariable::InternalLinkage, AddrSpace);
+ /*constant*/ !IsWindows, llvm::GlobalVariable::InternalLinkage, AddrSpace);
+
+ // Windows does not allow globals to be initialised to point to globals in
+ // different DLLs. Any such variables must run code to initialise them.
+ if (IsWindows) {
+ auto *Init = llvm::Function::Create(llvm::FunctionType::get(CGM.VoidTy,
+ {}), llvm::GlobalValue::InternalLinkage, ".block_isa_init",
+ &CGM.getModule());
+ llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
+ Init));
+ b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(),
+ b.CreateStructGEP(literal, 0), CGM.getPointerAlign().getQuantity());
+ b.CreateRetVoid();
+ // We can't use the normal LLVM global initialisation array, because we
+ // need to specify that this runs early in library initialisation.
+ auto *InitVar = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*isConstant*/true, llvm::GlobalValue::InternalLinkage,
+ Init, ".block_isa_init_ptr");
+ InitVar->setSection(".CRT$XCLa");
+ CGM.addUsedGlobal(InitVar);
+ }
// Return a constant of the appropriately-casted type.
llvm::Type *RequiredType =
@@ -1284,7 +1500,7 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
}
}
- SourceLocation StartLoc = BlockInfo->getBlockExpr()->getBody()->getLocStart();
+ SourceLocation StartLoc = BlockInfo->getBlockExpr()->getBody()->getBeginLoc();
ApplyDebugLocation Scope(*this, StartLoc);
// Instead of messing around with LocalDeclMap, just set the value
@@ -1314,7 +1530,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
CurGD = GD;
- CurEHLocation = blockInfo.getBlockExpr()->getLocEnd();
+ CurEHLocation = blockInfo.getBlockExpr()->getEndLoc();
BlockInfo = &blockInfo;
@@ -1379,7 +1595,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// Begin generating the function.
StartFunction(blockDecl, fnType->getReturnType(), fn, fnInfo, args,
blockDecl->getLocation(),
- blockInfo.getBlockExpr()->getBody()->getLocStart());
+ blockInfo.getBlockExpr()->getBody()->getBeginLoc());
// Okay. Undo some of what StartFunction did.
@@ -1480,35 +1696,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
return fn;
}
-namespace {
-
-/// Represents a type of copy/destroy operation that should be performed for an
-/// entity that's captured by a block.
-enum class BlockCaptureEntityKind {
- CXXRecord, // Copy or destroy
- ARCWeak,
- ARCStrong,
- NonTrivialCStruct,
- BlockObject, // Assign or release
- None
-};
-
-/// Represents a captured entity that requires extra operations in order for
-/// this entity to be copied or destroyed correctly.
-struct BlockCaptureManagedEntity {
- BlockCaptureEntityKind Kind;
- BlockFieldFlags Flags;
- const BlockDecl::Capture &CI;
- const CGBlockInfo::Capture &Capture;
-
- BlockCaptureManagedEntity(BlockCaptureEntityKind Type, BlockFieldFlags Flags,
- const BlockDecl::Capture &CI,
- const CGBlockInfo::Capture &Capture)
- : Kind(Type), Flags(Flags), CI(CI), Capture(Capture) {}
-};
-
-} // end anonymous namespace
-
static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
const LangOptions &LangOpts) {
@@ -1518,7 +1705,7 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags());
}
BlockFieldFlags Flags;
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
Flags = BLOCK_FIELD_IS_BYREF;
if (T.isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
@@ -1566,23 +1753,32 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
llvm_unreachable("after exhaustive PrimitiveCopyKind switch");
}
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
/// Find the set of block captures that need to be explicitly copied or destroy.
static void findBlockCapturedManagedEntities(
const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures,
- llvm::function_ref<std::pair<BlockCaptureEntityKind, BlockFieldFlags>(
- const BlockDecl::Capture &, QualType, const LangOptions &)>
- Predicate) {
+ SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures) {
for (const auto &CI : BlockInfo.getBlockDecl()->captures()) {
const VarDecl *Variable = CI.getVariable();
const CGBlockInfo::Capture &Capture = BlockInfo.getCapture(Variable);
if (Capture.isConstant())
continue;
- auto Info = Predicate(CI, Variable->getType(), LangOpts);
- if (Info.first != BlockCaptureEntityKind::None)
- ManagedCaptures.emplace_back(Info.first, Info.second, CI, Capture);
+ QualType VT = Capture.fieldType();
+ auto CopyInfo = computeCopyInfoForBlockCapture(CI, VT, LangOpts);
+ auto DisposeInfo = computeDestroyInfoForBlockCapture(CI, VT, LangOpts);
+ if (CopyInfo.first != BlockCaptureEntityKind::None ||
+ DisposeInfo.first != BlockCaptureEntityKind::None)
+ ManagedCaptures.emplace_back(CopyInfo.first, DisposeInfo.first,
+ CopyInfo.second, DisposeInfo.second, CI,
+ Capture);
}
+
+ // Sort the captures by offset.
+ llvm::sort(ManagedCaptures);
}
namespace {
@@ -1590,10 +1786,12 @@ namespace {
struct CallBlockRelease final : EHScopeStack::Cleanup {
Address Addr;
BlockFieldFlags FieldFlags;
- bool LoadBlockVarAddr;
+ bool LoadBlockVarAddr, CanThrow;
- CallBlockRelease(Address Addr, BlockFieldFlags Flags, bool LoadValue)
- : Addr(Addr), FieldFlags(Flags), LoadBlockVarAddr(LoadValue) {}
+ CallBlockRelease(Address Addr, BlockFieldFlags Flags, bool LoadValue,
+ bool CT)
+ : Addr(Addr), FieldFlags(Flags), LoadBlockVarAddr(LoadValue),
+ CanThrow(CT) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *BlockVarAddr;
@@ -1604,15 +1802,145 @@ struct CallBlockRelease final : EHScopeStack::Cleanup {
BlockVarAddr = Addr.getPointer();
}
- CGF.BuildBlockRelease(BlockVarAddr, FieldFlags);
+ CGF.BuildBlockRelease(BlockVarAddr, FieldFlags, CanThrow);
}
};
} // end anonymous namespace
+/// Check if \p T is a C++ class that has a destructor that can throw.
+bool CodeGenFunction::cxxDestructorCanThrow(QualType T) {
+ if (const auto *RD = T->getAsCXXRecordDecl())
+ if (const CXXDestructorDecl *DD = RD->getDestructor())
+ return DD->getType()->getAs<FunctionProtoType>()->canThrow();
+ return false;
+}
+
+// Return a string that has the information about a capture.
+static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+ CaptureStrKind StrKind,
+ CharUnits BlockAlignment,
+ CodeGenModule &CGM) {
+ std::string Str;
+ ASTContext &Ctx = CGM.getContext();
+ const BlockDecl::Capture &CI = *E.CI;
+ QualType CaptureTy = CI.getVariable()->getType();
+
+ BlockCaptureEntityKind Kind;
+ BlockFieldFlags Flags;
+
+ // CaptureStrKind::Merged should be passed only when the operations and the
+ // flags are the same for copy and dispose.
+ assert((StrKind != CaptureStrKind::Merged ||
+ (E.CopyKind == E.DisposeKind && E.CopyFlags == E.DisposeFlags)) &&
+ "different operations and flags");
+
+ if (StrKind == CaptureStrKind::DisposeHelper) {
+ Kind = E.DisposeKind;
+ Flags = E.DisposeFlags;
+ } else {
+ Kind = E.CopyKind;
+ Flags = E.CopyFlags;
+ }
+
+ switch (Kind) {
+ case BlockCaptureEntityKind::CXXRecord: {
+ Str += "c";
+ SmallString<256> TyStr;
+ llvm::raw_svector_ostream Out(TyStr);
+ CGM.getCXXABI().getMangleContext().mangleTypeName(CaptureTy, Out);
+ Str += llvm::to_string(TyStr.size()) + TyStr.c_str();
+ break;
+ }
+ case BlockCaptureEntityKind::ARCWeak:
+ Str += "w";
+ break;
+ case BlockCaptureEntityKind::ARCStrong:
+ Str += "s";
+ break;
+ case BlockCaptureEntityKind::BlockObject: {
+ const VarDecl *Var = CI.getVariable();
+ unsigned F = Flags.getBitMask();
+ if (F & BLOCK_FIELD_IS_BYREF) {
+ Str += "r";
+ if (F & BLOCK_FIELD_IS_WEAK)
+ Str += "w";
+ else {
+ // If CaptureStrKind::Merged is passed, check both the copy expression
+ // and the destructor.
+ if (StrKind != CaptureStrKind::DisposeHelper) {
+ if (Ctx.getBlockVarCopyInit(Var).canThrow())
+ Str += "c";
+ }
+ if (StrKind != CaptureStrKind::CopyHelper) {
+ if (CodeGenFunction::cxxDestructorCanThrow(CaptureTy))
+ Str += "d";
+ }
+ }
+ } else {
+ assert((F & BLOCK_FIELD_IS_OBJECT) && "unexpected flag value");
+ if (F == BLOCK_FIELD_IS_BLOCK)
+ Str += "b";
+ else
+ Str += "o";
+ }
+ break;
+ }
+ case BlockCaptureEntityKind::NonTrivialCStruct: {
+ bool IsVolatile = CaptureTy.isVolatileQualified();
+ CharUnits Alignment =
+ BlockAlignment.alignmentAtOffset(E.Capture->getOffset());
+
+ Str += "n";
+ std::string FuncStr;
+ if (StrKind == CaptureStrKind::DisposeHelper)
+ FuncStr = CodeGenFunction::getNonTrivialDestructorStr(
+ CaptureTy, Alignment, IsVolatile, Ctx);
+ else
+ // If CaptureStrKind::Merged is passed, use the copy constructor string.
+ // It has all the information that the destructor string has.
+ FuncStr = CodeGenFunction::getNonTrivialCopyConstructorStr(
+ CaptureTy, Alignment, IsVolatile, Ctx);
+ // The underscore is necessary here because non-trivial copy constructor
+ // and destructor strings can start with a number.
+ Str += llvm::to_string(FuncStr.size()) + "_" + FuncStr;
+ break;
+ }
+ case BlockCaptureEntityKind::None:
+ break;
+ }
+
+ return Str;
+}
+
+static std::string getCopyDestroyHelperFuncName(
+ const SmallVectorImpl<BlockCaptureManagedEntity> &Captures,
+ CharUnits BlockAlignment, CaptureStrKind StrKind, CodeGenModule &CGM) {
+ assert((StrKind == CaptureStrKind::CopyHelper ||
+ StrKind == CaptureStrKind::DisposeHelper) &&
+ "unexpected CaptureStrKind");
+ std::string Name = StrKind == CaptureStrKind::CopyHelper
+ ? "__copy_helper_block_"
+ : "__destroy_helper_block_";
+ if (CGM.getLangOpts().Exceptions)
+ Name += "e";
+ if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Name += "a";
+ Name += llvm::to_string(BlockAlignment.getQuantity()) + "_";
+
+ for (const BlockCaptureManagedEntity &E : Captures) {
+ Name += llvm::to_string(E.Capture->getOffset().getQuantity());
+ Name += getBlockCaptureStr(E, StrKind, BlockAlignment, CGM);
+ }
+
+ return Name;
+}
+
static void pushCaptureCleanup(BlockCaptureEntityKind CaptureKind,
Address Field, QualType CaptureType,
- BlockFieldFlags Flags, bool EHOnly,
- CodeGenFunction &CGF) {
+ BlockFieldFlags Flags, bool ForCopyHelper,
+ VarDecl *Var, CodeGenFunction &CGF) {
+ bool EHOnly = ForCopyHelper;
+
switch (CaptureKind) {
case BlockCaptureEntityKind::CXXRecord:
case BlockCaptureEntityKind::ARCWeak:
@@ -1634,15 +1962,34 @@ static void pushCaptureCleanup(BlockCaptureEntityKind CaptureKind,
case BlockCaptureEntityKind::BlockObject: {
if (!EHOnly || CGF.getLangOpts().Exceptions) {
CleanupKind Kind = EHOnly ? EHCleanup : NormalAndEHCleanup;
- CGF.enterByrefCleanup(Kind, Field, Flags, /*LoadBlockVarAddr*/ true);
+ // Calls to _Block_object_dispose along the EH path in the copy helper
+ // function don't throw as newly-copied __block variables always have a
+ // reference count of 2.
+ bool CanThrow =
+ !ForCopyHelper && CGF.cxxDestructorCanThrow(CaptureType);
+ CGF.enterByrefCleanup(Kind, Field, Flags, /*LoadBlockVarAddr*/ true,
+ CanThrow);
}
break;
}
case BlockCaptureEntityKind::None:
- llvm_unreachable("unexpected BlockCaptureEntityKind");
+ break;
}
}
+static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FI,
+ CodeGenModule &CGM) {
+ if (CapturesNonExternalType) {
+ CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ } else {
+ Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Fn->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
+ }
+}
/// Generate the copy-helper function for a block closure object:
/// static void block_copy_helper(block_t *dst, block_t *src);
/// The runtime will have previously initialized 'dst' by doing a
@@ -1653,42 +2000,51 @@ static void pushCaptureCleanup(BlockCaptureEntityKind CaptureKind,
/// the contents of an individual __block variable to the heap.
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
+ findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures);
+ std::string FuncName =
+ getCopyDestroyHelperFuncName(CopiedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::CopyHelper, CGM);
+
+ if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
+ return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
+
ASTContext &C = getContext();
+ QualType ReturnTy = C.VoidTy;
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__copy_helper_block_", &CGM.getModule());
+ llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
+ FuncName, &CGM.getModule());
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__copy_helper_block_");
+ IdentifierInfo *II = &C.Idents.get(FuncName);
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(C.VoidPtrTy);
+ ArgTys.push_back(C.VoidPtrTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getLocStart()};
+ setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
+ CGM);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
+ ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
Address src = GetAddrOfLocalVar(&SrcDecl);
@@ -1699,88 +2055,81 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
- SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures,
- computeCopyInfoForBlockCapture);
-
for (const auto &CopiedCapture : CopiedCaptures) {
- const BlockDecl::Capture &CI = CopiedCapture.CI;
- const CGBlockInfo::Capture &capture = CopiedCapture.Capture;
+ const BlockDecl::Capture &CI = *CopiedCapture.CI;
+ const CGBlockInfo::Capture &capture = *CopiedCapture.Capture;
QualType captureType = CI.getVariable()->getType();
- BlockFieldFlags flags = CopiedCapture.Flags;
+ BlockFieldFlags flags = CopiedCapture.CopyFlags;
unsigned index = capture.getIndex();
Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
- // If there's an explicit copy expression, we do that.
- if (CI.getCopyExpr()) {
- assert(CopiedCapture.Kind == BlockCaptureEntityKind::CXXRecord);
+ switch (CopiedCapture.CopyKind) {
+ case BlockCaptureEntityKind::CXXRecord:
+ // If there's an explicit copy expression, we do that.
+ assert(CI.getCopyExpr() && "copy expression for variable is missing");
EmitSynthesizedCXXCopyCtor(dstField, srcField, CI.getCopyExpr());
- } else if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCWeak) {
+ break;
+ case BlockCaptureEntityKind::ARCWeak:
EmitARCCopyWeak(dstField, srcField);
- // If this is a C struct that requires non-trivial copy construction, emit a
- // call to its copy constructor.
- } else if (CopiedCapture.Kind ==
- BlockCaptureEntityKind::NonTrivialCStruct) {
+ break;
+ case BlockCaptureEntityKind::NonTrivialCStruct: {
+ // If this is a C struct that requires non-trivial copy construction,
+ // emit a call to its copy constructor.
QualType varType = CI.getVariable()->getType();
callCStructCopyConstructor(MakeAddrLValue(dstField, varType),
MakeAddrLValue(srcField, varType));
- } else {
+ break;
+ }
+ case BlockCaptureEntityKind::ARCStrong: {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCStrong) {
- // At -O0, store null into the destination field (so that the
- // storeStrong doesn't over-release) and then call storeStrong.
- // This is a workaround to not having an initStrong call.
- if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
- auto *ty = cast<llvm::PointerType>(srcValue->getType());
- llvm::Value *null = llvm::ConstantPointerNull::get(ty);
- Builder.CreateStore(null, dstField);
- EmitARCStoreStrongCall(dstField, srcValue, true);
-
- // With optimization enabled, take advantage of the fact that
- // the blocks runtime guarantees a memcpy of the block data, and
- // just emit a retain of the src field.
- } else {
- EmitARCRetainNonBlock(srcValue);
-
- // Unless EH cleanup is required, we don't need this anymore, so kill
- // it. It's not quite worth the annoyance to avoid creating it in the
- // first place.
- if (!needsEHCleanup(captureType.isDestructedType()))
- cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
- }
+ // At -O0, store null into the destination field (so that the
+ // storeStrong doesn't over-release) and then call storeStrong.
+ // This is a workaround to not having an initStrong call.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ auto *ty = cast<llvm::PointerType>(srcValue->getType());
+ llvm::Value *null = llvm::ConstantPointerNull::get(ty);
+ Builder.CreateStore(null, dstField);
+ EmitARCStoreStrongCall(dstField, srcValue, true);
+
+ // With optimization enabled, take advantage of the fact that
+ // the blocks runtime guarantees a memcpy of the block data, and
+ // just emit a retain of the src field.
} else {
- assert(CopiedCapture.Kind == BlockCaptureEntityKind::BlockObject);
- srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr =
- Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
- llvm::Value *args[] = {
- dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
- };
-
- const VarDecl *variable = CI.getVariable();
- bool copyCanThrow = false;
- if (CI.isByRef() && variable->getType()->getAsCXXRecordDecl()) {
- const Expr *copyExpr =
- CGM.getContext().getBlockVarCopyInits(variable);
- if (copyExpr) {
- copyCanThrow = true; // FIXME: reuse the noexcept logic
- }
- }
+ EmitARCRetainNonBlock(srcValue);
- if (copyCanThrow) {
- EmitRuntimeCallOrInvoke(CGM.getBlockObjectAssign(), args);
- } else {
- EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args);
- }
+ // Unless EH cleanup is required, we don't need this anymore, so kill
+ // it. It's not quite worth the annoyance to avoid creating it in the
+ // first place.
+ if (!needsEHCleanup(captureType.isDestructedType()))
+ cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
}
+ break;
+ }
+ case BlockCaptureEntityKind::BlockObject: {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr =
+ Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
+ llvm::Value *args[] = {
+ dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
+ };
+
+ if (CI.isByRef() && C.getBlockVarCopyInit(CI.getVariable()).canThrow())
+ EmitRuntimeCallOrInvoke(CGM.getBlockObjectAssign(), args);
+ else
+ EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args);
+ break;
+ }
+ case BlockCaptureEntityKind::None:
+ continue;
}
// Ensure that we destroy the copied object if an exception is thrown later
// in the helper function.
- pushCaptureCleanup(CopiedCapture.Kind, dstField, captureType, flags, /*EHOnly*/ true,
- *this);
+ pushCaptureCleanup(CopiedCapture.CopyKind, dstField, captureType, flags,
+ /*ForCopyHelper*/ true, CI.getVariable(), *this);
}
FinishFunction();
@@ -1800,7 +2149,7 @@ getBlockFieldFlagsForObjCObjectPointer(const BlockDecl::Capture &CI,
static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
const LangOptions &LangOpts) {
- if (CI.isByRef()) {
+ if (CI.isEscapingByref()) {
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (T.isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
@@ -1844,37 +2193,50 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
/// variable.
llvm::Constant *
CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
+ findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures);
+ std::string FuncName =
+ getCopyDestroyHelperFuncName(DestroyedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::DisposeHelper, CGM);
+
+ if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
+ return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
+
ASTContext &C = getContext();
+ QualType ReturnTy = C.VoidTy;
+
FunctionArgList args;
- ImplicitParamDecl SrcDecl(getContext(), C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__destroy_helper_block_", &CGM.getModule());
+ llvm::Function::Create(LTy, llvm::GlobalValue::LinkOnceODRLinkage,
+ FuncName, &CGM.getModule());
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__destroy_helper_block_");
+ IdentifierInfo *II = &C.Idents.get(FuncName);
+
+ SmallVector<QualType, 1> ArgTys;
+ ArgTys.push_back(C.VoidPtrTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
- FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false, false);
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
+ CGM);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
+ markAsIgnoreThreadCheckingAtRuntime(Fn);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
- ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getLocStart()};
+ ApplyDebugLocation NL{*this, blockInfo.getBlockExpr()->getBeginLoc()};
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1884,20 +2246,17 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
CodeGenFunction::RunCleanupsScope cleanups(*this);
- SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures,
- computeDestroyInfoForBlockCapture);
-
for (const auto &DestroyedCapture : DestroyedCaptures) {
- const BlockDecl::Capture &CI = DestroyedCapture.CI;
- const CGBlockInfo::Capture &capture = DestroyedCapture.Capture;
- BlockFieldFlags flags = DestroyedCapture.Flags;
+ const BlockDecl::Capture &CI = *DestroyedCapture.CI;
+ const CGBlockInfo::Capture &capture = *DestroyedCapture.Capture;
+ BlockFieldFlags flags = DestroyedCapture.DisposeFlags;
Address srcField =
Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
- pushCaptureCleanup(DestroyedCapture.Kind, srcField,
- CI.getVariable()->getType(), flags, /*EHOnly*/ false, *this);
+ pushCaptureCleanup(DestroyedCapture.DisposeKind, srcField,
+ CI.getVariable()->getType(), flags,
+ /*ForCopyHelper*/ false, CI.getVariable(), *this);
}
cleanups.ForceCleanup();
@@ -1937,7 +2296,7 @@ public:
field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
llvm::Value *value = CGF.Builder.CreateLoad(field);
- CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
+ CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER, false);
}
void profileImpl(llvm::FoldingSetNodeID &id) const override {
@@ -2093,19 +2452,17 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
BlockByrefHelpers &generator) {
ASTContext &Context = CGF.getContext();
- QualType R = Context.VoidTy;
+ QualType ReturnTy = Context.VoidTy;
FunctionArgList args;
- ImplicitParamDecl Dst(CGF.getContext(), Context.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl Dst(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&Dst);
- ImplicitParamDecl Src(CGF.getContext(), Context.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamDecl Src(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
args.push_back(&Src);
const CGFunctionInfo &FI =
- CGF.CGM.getTypes().arrangeBuiltinFunctionDeclaration(R, args);
+ CGF.CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGF.CGM.getTypes().GetFunctionType(FI);
@@ -2118,16 +2475,18 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
IdentifierInfo *II
= &Context.Idents.get("__Block_byref_object_copy_");
- FunctionDecl *FD = FunctionDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, R, nullptr,
- SC_Static,
- false, false);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(Context.VoidPtrTy);
+ ArgTys.push_back(Context.VoidPtrTy);
+ QualType FunctionTy = Context.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ Context, Context.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- CGF.StartFunction(FD, R, Fn, FI, args);
+ CGF.StartFunction(FD, ReturnTy, Fn, FI, args);
if (generator.needsCopy()) {
llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
@@ -2192,12 +2551,13 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
IdentifierInfo *II
= &Context.Idents.get("__Block_byref_object_dispose_");
- FunctionDecl *FD = FunctionDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, R, nullptr,
- SC_Static,
- false, false);
+ SmallVector<QualType, 1> ArgTys;
+ ArgTys.push_back(Context.VoidPtrTy);
+ QualType FunctionTy = Context.getFunctionType(R, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ Context, Context.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
@@ -2254,6 +2614,9 @@ BlockByrefHelpers *
CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
+ assert(var.isEscapingByref() &&
+ "only escaping __block variables need byref helpers");
+
QualType type = var.getType();
auto &byrefInfo = getBlockByrefInfo(&var);
@@ -2264,7 +2627,8 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
byrefInfo.ByrefAlignment.alignmentAtOffset(byrefInfo.FieldOffset);
if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
- const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
+ const Expr *copyExpr =
+ CGM.getContext().getBlockVarCopyInit(&var).getCopyExpr();
if (!copyExpr && record->hasTrivialDestructor()) return nullptr;
return ::buildByrefHelpers(
@@ -2567,19 +2931,25 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
}
}
-void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags,
+ bool CanThrow) {
llvm::Value *F = CGM.getBlockObjectDispose();
llvm::Value *args[] = {
Builder.CreateBitCast(V, Int8PtrTy),
llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
- EmitNounwindRuntimeCall(F, args); // FIXME: throwing destructors?
+
+ if (CanThrow)
+ EmitRuntimeCallOrInvoke(F, args);
+ else
+ EmitNounwindRuntimeCall(F, args);
}
void CodeGenFunction::enterByrefCleanup(CleanupKind Kind, Address Addr,
BlockFieldFlags Flags,
- bool LoadBlockVarAddr) {
- EHStack.pushCleanup<CallBlockRelease>(Kind, Addr, Flags, LoadBlockVarAddr);
+ bool LoadBlockVarAddr, bool CanThrow) {
+ EHStack.pushCleanup<CallBlockRelease>(Kind, Addr, Flags, LoadBlockVarAddr,
+ CanThrow);
}
/// Adjust the declaration of something from the blocks API.
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 5abf82b3f6e1..3f9fc16d9b10 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -60,7 +60,7 @@ enum BlockLiteralFlags {
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_USE_STRET = (1 << 29),
BLOCK_HAS_SIGNATURE = (1 << 30),
- BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31)
+ BLOCK_HAS_EXTENDED_LAYOUT = (1u << 31)
};
class BlockFlags {
uint32_t flags;
@@ -132,6 +132,9 @@ public:
friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
return (l.flags & r.flags);
}
+ bool operator==(BlockFieldFlags Other) const {
+ return flags == Other.flags;
+ }
};
inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
return BlockFieldFlags(l) | BlockFieldFlags(r);
@@ -231,6 +234,11 @@ public:
/// and their layout meta-data has been generated.
bool HasCapturedVariableLayout : 1;
+ /// Indicates whether an object of a non-external C++ class is captured. This
+ /// bit is used to determine the linkage of the block copy/destroy helper
+ /// functions.
+ bool CapturesNonExternalType : 1;
+
/// The mapping of allocated indexes within the block.
llvm::DenseMap<const VarDecl*, Capture> Captures;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index e99121c46d9b..a718f2f19aa6 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -21,10 +21,11 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
-#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
@@ -93,11 +94,11 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
-/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
-static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
- llvm::AtomicRMWInst::BinOp Kind,
- const CallExpr *E) {
+static Value *MakeBinaryAtomicValue(
+ CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -119,7 +120,7 @@ static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
+ Kind, Args[0], Args[1], Ordering);
return EmitFromInt(CGF, Result, T, ValueType);
}
@@ -151,7 +152,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
}
-/// Utility to insert an atomic instruction based Instrinsic::ID and
+/// Utility to insert an atomic instruction based Intrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
@@ -200,6 +201,9 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
+///
+/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
+/// invoke the function EmitAtomicCmpXchgForMSIntrin.
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
@@ -230,6 +234,72 @@ static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
ValueType);
}
+/// This function should be invoked to emit atomic cmpxchg for Microsoft's
+/// _InterlockedCompareExchange* intrinsics which have the following signature:
+/// T _InterlockedCompareExchange(T volatile *Destination,
+/// T Exchange,
+/// T Comparand);
+///
+/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
+/// cmpxchg *Destination, Comparand, Exchange.
+/// So we need to swap Comparand and Exchange when invoking
+/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
+/// function MakeAtomicCmpXchgValue since it expects the arguments to be
+/// already swapped.
+
+static
+Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(
+ E->getType(), E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(1)->getType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(2)->getType()));
+
+ auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
+ auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
+
+ // For Release ordering, the failure ordering should be Monotonic.
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
+ AtomicOrdering::Monotonic :
+ SuccessOrdering;
+
+ auto *Result = CGF.Builder.CreateAtomicCmpXchg(
+ Destination, Comparand, Exchange,
+ SuccessOrdering, FailureOrdering);
+ Result->setVolatile(true);
+ return CGF.Builder.CreateExtractValue(Result, 0);
+}
+
+static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+
+ auto *IntTy = CGF.ConvertType(E->getType());
+ auto *Result = CGF.Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ Ordering);
+ return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
+}
+
+static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+ assert(E->getArg(0)->getType()->isPointerType());
+
+ auto *IntTy = CGF.ConvertType(E->getType());
+ auto *Result = CGF.Builder.CreateAtomicRMW(
+ AtomicRMWInst::Sub,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ Ordering);
+ return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -316,7 +386,7 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) {
- CGCallee callee = CGCallee::forDirect(calleeValue, FD);
+ CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
@@ -461,7 +531,7 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
assert(DIter != LocalDeclMap.end());
return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
- getContext().getSizeType(), E->getLocStart());
+ getContext().getSizeType(), E->getBeginLoc());
}
}
@@ -485,7 +555,7 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
}
namespace {
-/// A struct to generically desribe a bit test intrinsic.
+/// A struct to generically describe a bit test intrinsic.
struct BitTest {
enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
enum InterlockingKind : uint8_t {
@@ -711,8 +781,11 @@ static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
} else {
Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
Arg1Ty = CGF.Int8PtrTy;
- Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
- llvm::ConstantInt::get(CGF.Int32Ty, 0));
+ if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
+ Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::sponentry));
+ } else
+ Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
+ llvm::ConstantInt::get(CGF.Int32Ty, 0));
}
// Mark the call site and declaration with ReturnsTwice.
@@ -745,6 +818,30 @@ enum class CodeGenFunction::MSVCIntrin {
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
+ _InterlockedExchangeAdd_acq,
+ _InterlockedExchangeAdd_rel,
+ _InterlockedExchangeAdd_nf,
+ _InterlockedExchange_acq,
+ _InterlockedExchange_rel,
+ _InterlockedExchange_nf,
+ _InterlockedCompareExchange_acq,
+ _InterlockedCompareExchange_rel,
+ _InterlockedCompareExchange_nf,
+ _InterlockedOr_acq,
+ _InterlockedOr_rel,
+ _InterlockedOr_nf,
+ _InterlockedXor_acq,
+ _InterlockedXor_rel,
+ _InterlockedXor_nf,
+ _InterlockedAnd_acq,
+ _InterlockedAnd_rel,
+ _InterlockedAnd_nf,
+ _InterlockedIncrement_acq,
+ _InterlockedIncrement_rel,
+ _InterlockedIncrement_nf,
+ _InterlockedDecrement_acq,
+ _InterlockedDecrement_rel,
+ _InterlockedDecrement_nf,
__fastfail,
};
@@ -811,25 +908,74 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
-
- case MSVCIntrin::_InterlockedDecrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
- }
- case MSVCIntrin::_InterlockedIncrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
- }
+ case MSVCIntrin::_InterlockedExchangeAdd_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedExchangeAdd_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedExchangeAdd_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedExchange_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedExchange_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedExchange_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedCompareExchange_acq:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedCompareExchange_rel:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedCompareExchange_nf:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedOr_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedOr_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedOr_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedXor_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedXor_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedXor_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedAnd_acq:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedAnd_rel:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedAnd_nf:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedIncrement_acq:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedIncrement_rel:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedIncrement_nf:
+ return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedDecrement_acq:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedDecrement_rel:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedDecrement_nf:
+ return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
+
+ case MSVCIntrin::_InterlockedDecrement:
+ return EmitAtomicDecrementValue(*this, E);
+ case MSVCIntrin::_InterlockedIncrement:
+ return EmitAtomicIncrementValue(*this, E);
case MSVCIntrin::__fastfail: {
// Request immediate process termination from the kernel. The instruction
@@ -923,35 +1069,42 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
if (llvm::Function *F = CGM.getModule().getFunction(Name))
return F;
+ llvm::SmallVector<QualType, 4> ArgTys;
llvm::SmallVector<ImplicitParamDecl, 4> Params;
Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"),
Ctx.VoidPtrTy, ImplicitParamDecl::Other);
+ ArgTys.emplace_back(Ctx.VoidPtrTy);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
char Size = Layout.Items[I].getSizeByte();
if (!Size)
continue;
+ QualType ArgTy = getOSLogArgType(Ctx, Size);
Params.emplace_back(
Ctx, nullptr, SourceLocation(),
- &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)),
- getOSLogArgType(Ctx, Size), ImplicitParamDecl::Other);
+ &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
+ ImplicitParamDecl::Other);
+ ArgTys.emplace_back(ArgTy);
}
FunctionArgList Args;
for (auto &P : Params)
Args.push_back(&P);
+ QualType ReturnTy = Ctx.VoidTy;
+ QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
+
// The helper function has linkonce_odr linkage to enable the linker to merge
// identical functions. To ensure the merging always happens, 'noinline' is
// attached to the function when compiling with -Oz.
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = llvm::Function::Create(
FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
// Attach 'noinline' at -Oz.
@@ -962,9 +1115,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
IdentifierInfo *II = &Ctx.Idents.get(Name);
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
+ FuncionTy, nullptr, SC_PrivateExtern, false, false);
- StartFunction(FD, Ctx.VoidTy, Fn, FI, Args);
+ StartFunction(FD, ReturnTy, Fn, FI, Args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
@@ -1024,7 +1177,12 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
llvm::Value *ArgVal;
- if (const Expr *TheExpr = Item.getExpr()) {
+ if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
+ uint64_t Val = 0;
+ for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
+ Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
+ ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
+ } else if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
// Check if this is a retainable type.
@@ -1077,7 +1235,7 @@ static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) {
return BuiltinID == Builtin::BI__builtin_mul_overflow &&
- Op1Info.Width == Op2Info.Width && Op1Info.Width >= ResultInfo.Width &&
+ std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
Op1Info.Signed != Op2Info.Signed;
}
@@ -1098,11 +1256,20 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
+ unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
+ unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
+
+ // One of the operands may be smaller than the other. If so, [s|z]ext it.
+ if (SignedOpWidth < UnsignedOpWidth)
+ Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
+ if (UnsignedOpWidth < SignedOpWidth)
+ Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
llvm::Type *OpTy = Signed->getType();
llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
llvm::Type *ResTy = ResultPtr.getElementType();
+ unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
// Take the absolute value of the signed operand.
llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
@@ -1120,8 +1287,8 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
if (ResultInfo.Signed) {
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
- auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width)
- .zextOrSelf(Op1Info.Width);
+ auto IntMax =
+ llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
@@ -1139,9 +1306,9 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
llvm::Value *Underflow = CGF.Builder.CreateAnd(
IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
- if (ResultInfo.Width < Op1Info.Width) {
+ if (ResultInfo.Width < OpWidth) {
auto IntMax =
- llvm::APInt::getMaxValue(ResultInfo.Width).zext(Op1Info.Width);
+ llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
@@ -1252,9 +1419,61 @@ static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
return Res;
}
-RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E,
+static bool
+TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
+ llvm::SmallPtrSetImpl<const Decl *> &Seen) {
+ if (const auto *Arr = Ctx.getAsArrayType(Ty))
+ Ty = Ctx.getBaseElementType(Arr);
+
+ const auto *Record = Ty->getAsCXXRecordDecl();
+ if (!Record)
+ return false;
+
+ // We've already checked this type, or are in the process of checking it.
+ if (!Seen.insert(Record).second)
+ return false;
+
+ assert(Record->hasDefinition() &&
+ "Incomplete types should already be diagnosed");
+
+ if (Record->isDynamicClass())
+ return true;
+
+ for (FieldDecl *F : Record->fields()) {
+ if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
+ return true;
+ }
+ return false;
+}
+
+/// Determine if the specified type requires laundering by checking if it is a
+/// dynamic class type or contains a subobject which is a dynamic class type.
+static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
+ if (!CGM.getCodeGenOpts().StrictVTablePointers)
+ return false;
+ llvm::SmallPtrSet<const Decl *, 16> Seen;
+ return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
+}
+
+RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
+ llvm::Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
+
+ // The builtin's shift arg may have a different type than the source arg and
+ // result, but the LLVM intrinsic uses the same type for all values.
+ llvm::Type *Ty = Src->getType();
+ ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
+
+ // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
+ unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Value *F = CGM.getIntrinsic(IID, Ty);
+ return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
+ const CallExpr *E,
ReturnValueSlot ReturnValue) {
+ const FunctionDecl *FD = GD.getDecl()->getAsFunction();
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
@@ -1537,6 +1756,26 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(ComplexVal.second);
}
+ case Builtin::BI__builtin_clrsb:
+ case Builtin::BI__builtin_clrsbl:
+ case Builtin::BI__builtin_clrsbll: {
+ // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
+ Value *Inverse = Builder.CreateNot(ArgValue, "not");
+ Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
+ Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
+ Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
@@ -1609,6 +1848,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__lzcnt16:
+ case Builtin::BI__lzcnt:
+ case Builtin::BI__lzcnt64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
case Builtin::BI__popcnt16:
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64:
@@ -1627,46 +1881,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
- case Builtin::BI_rotr8:
- case Builtin::BI_rotr16:
- case Builtin::BI_rotr:
- case Builtin::BI_lrotr:
- case Builtin::BI_rotr64: {
- Value *Val = EmitScalarExpr(E->getArg(0));
- Value *Shift = EmitScalarExpr(E->getArg(1));
-
- llvm::Type *ArgType = Val->getType();
- Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = ArgType->getIntegerBitWidth();
- Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
-
- Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask);
- Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
- Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
- Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
- Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
- return RValue::get(Result);
- }
- case Builtin::BI_rotl8:
- case Builtin::BI_rotl16:
- case Builtin::BI_rotl:
- case Builtin::BI_lrotl:
- case Builtin::BI_rotl64: {
- Value *Val = EmitScalarExpr(E->getArg(0));
- Value *Shift = EmitScalarExpr(E->getArg(1));
-
- llvm::Type *ArgType = Val->getType();
- Shift = Builder.CreateIntCast(Shift, ArgType, false);
- unsigned ArgWidth = ArgType->getIntegerBitWidth();
- Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
-
- Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask);
- Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
- Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
- Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
- Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
- return RValue::get(Result);
- }
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
@@ -1690,15 +1904,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_assume_aligned: {
- Value *PtrValue = EmitScalarExpr(E->getArg(0));
+ const Expr *Ptr = E->getArg(0);
+ Value *PtrValue = EmitScalarExpr(Ptr);
Value *OffsetValue =
(E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
- unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
+ unsigned Alignment = (unsigned)AlignmentCI->getZExtValue();
- EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
+ EmitAlignmentAssumption(PtrValue, Ptr, /*The expr loc is sufficient.*/ SourceLocation(),
+ Alignment, OffsetValue);
return RValue::get(PtrValue);
}
case Builtin::BI__assume:
@@ -1721,6 +1937,48 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_bitreverse64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
}
+ case Builtin::BI__builtin_rotateleft8:
+ case Builtin::BI__builtin_rotateleft16:
+ case Builtin::BI__builtin_rotateleft32:
+ case Builtin::BI__builtin_rotateleft64:
+ case Builtin::BI_rotl8: // Microsoft variants of rotate left
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64:
+ return emitRotate(E, false);
+
+ case Builtin::BI__builtin_rotateright8:
+ case Builtin::BI__builtin_rotateright16:
+ case Builtin::BI__builtin_rotateright32:
+ case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI_rotr8: // Microsoft variants of rotate right
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64:
+ return emitRotate(E, true);
+
+ case Builtin::BI__builtin_constant_p: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ // At -O0, we don't perform inlining, so we don't need to delay the
+ // processing.
+ return RValue::get(ConstantInt::get(ResultType, 0));
+
+ const Expr *Arg = E->getArg(0);
+ QualType ArgType = Arg->getType();
+ if (!hasScalarEvaluationKind(ArgType) || ArgType->isFunctionType())
+ // We can only reason about scalar types.
+ return RValue::get(ConstantInt::get(ResultType, 0));
+
+ Value *ArgValue = EmitScalarExpr(Arg);
+ Value *F = CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_object_size: {
unsigned Type =
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
@@ -1985,10 +2243,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2009,10 +2269,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2047,10 +2309,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- llvm::APSInt Size, DstSize;
- if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
- !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ Expr::EvalResult SizeResult, DstSizeResult;
+ if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
break;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -2258,6 +2522,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(nullptr);
}
+ case Builtin::BI__builtin_launder: {
+ const Expr *Arg = E->getArg(0);
+ QualType ArgTy = Arg->getType()->getPointeeType();
+ Value *Ptr = EmitScalarExpr(Arg);
+ if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
+ Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
+
+ return RValue::get(Ptr);
+ }
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
@@ -2952,7 +3225,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedExchangePointer:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
- case Builtin::BI_InterlockedCompareExchangePointer: {
+ case Builtin::BI_InterlockedCompareExchangePointer:
+ case Builtin::BI_InterlockedCompareExchangePointer_nf: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
IntegerType::get(getLLVMContext(),
@@ -2969,10 +3243,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *Comparand =
Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
- auto Result =
- Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
+ auto Ordering =
+ BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
+ AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
+
+ auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ Ordering, Ordering);
Result->setVolatile(true);
return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
@@ -2982,16 +3258,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
case Builtin::BI_InterlockedCompareExchange:
- case Builtin::BI_InterlockedCompareExchange64: {
- AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(1)),
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
- CXI->setVolatile(true);
- return RValue::get(Builder.CreateExtractValue(CXI, 0));
- }
+ case Builtin::BI_InterlockedCompareExchange64:
+ return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
case Builtin::BI_InterlockedIncrement16:
case Builtin::BI_InterlockedIncrement:
return RValue::get(
@@ -3337,24 +3605,31 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
- auto CreateArrayForSizeVar = [=](unsigned First) {
- auto *AT = llvm::ArrayType::get(SizeTy, NumArgs - First);
- auto *Arr = Builder.CreateAlloca(AT);
- llvm::Value *Ptr;
+ auto CreateArrayForSizeVar = [=](unsigned First)
+ -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
+ llvm::APInt ArraySize(32, NumArgs - First);
+ QualType SizeArrayTy = getContext().getConstantArrayType(
+ getContext().getSizeType(), ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
+ llvm::Value *TmpPtr = Tmp.getPointer();
+ llvm::Value *TmpSize = EmitLifetimeStart(
+ CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
+ llvm::Value *ElemPtr;
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
for (unsigned I = First; I < NumArgs; ++I) {
auto *Index = llvm::ConstantInt::get(IntTy, I - First);
- auto *GEP = Builder.CreateGEP(Arr, {Zero, Index});
+ auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
if (I == First)
- Ptr = GEP;
+ ElemPtr = GEP;
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
}
- return Ptr;
+ return std::tie(ElemPtr, TmpSize, TmpPtr);
};
// Could have events and/or varargs.
@@ -3366,24 +3641,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- auto *PtrToSizeArray = CreateArrayForSizeVar(4);
+ llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
+ std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
std::vector<llvm::Value *> Args = {
Queue, Flags, Range,
Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
- PtrToSizeArray};
+ ElemPtr};
std::vector<llvm::Type *> ArgTys = {
- QueueTy, IntTy, RangeTy,
- GenericVoidPtrTy, GenericVoidPtrTy, IntTy,
- PtrToSizeArray->getType()};
+ QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
+ GenericVoidPtrTy, IntTy, ElemPtr->getType()};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ auto Call =
+ RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ llvm::ArrayRef<llvm::Value *>(Args)));
+ if (TmpSize)
+ EmitLifetimeEnd(TmpSize, TmpPtr);
+ return Call;
}
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
@@ -3400,7 +3678,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
// Convert to generic address space.
EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
- ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
+ ClkEvent = ClkEvent->getType()->isIntegerTy()
+ ? Builder.CreateBitOrPointerCast(ClkEvent, EventPtrTy)
+ : Builder.CreatePointerCast(ClkEvent, EventPtrTy);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
@@ -3430,15 +3710,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
- auto *PtrToSizeArray = CreateArrayForSizeVar(7);
- Args.push_back(PtrToSizeArray);
- ArgTys.push_back(PtrToSizeArray->getType());
+ llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
+ std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
+ Args.push_back(ElemPtr);
+ ArgTys.push_back(ElemPtr->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- llvm::ArrayRef<llvm::Value *>(Args)));
+ auto Call =
+ RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ llvm::ArrayRef<llvm::Value *>(Args)));
+ if (TmpSize)
+ EmitLifetimeEnd(TmpSize, TmpPtr);
+ return Call;
}
LLVM_FALLTHROUGH;
}
@@ -3530,13 +3814,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_os_log_format:
return emitBuiltinOSLogFormat(*E);
- case Builtin::BI__builtin_os_log_format_buffer_size: {
- analyze_os_log::OSLogBufferLayout Layout;
- analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
- return RValue::get(ConstantInt::get(ConvertType(E->getType()),
- Layout.size().getQuantity()));
- }
-
case Builtin::BI__xray_customevent: {
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
@@ -3703,6 +3980,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// we need to do a bit cast.
llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
+ // XXX - vector of pointers?
+ if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
+ if (PtrTy->getAddressSpace() !=
+ ArgValue->getType()->getPointerAddressSpace()) {
+ ArgValue = Builder.CreateAddrSpaceCast(
+ ArgValue,
+ ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ }
+ }
+
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
@@ -3719,6 +4006,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
+ // XXX - vector of pointers?
+ if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
+ if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
+ V = Builder.CreateAddrSpaceCast(
+ V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ }
+ }
+
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
@@ -4286,6 +4581,14 @@ static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
+ NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
+ NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
@@ -5259,6 +5562,34 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
+ case NEON::BI__builtin_neon_vfmlal_low_v:
+ case NEON::BI__builtin_neon_vfmlalq_low_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
+ }
+ case NEON::BI__builtin_neon_vfmlsl_low_v:
+ case NEON::BI__builtin_neon_vfmlslq_low_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
+ }
+ case NEON::BI__builtin_neon_vfmlal_high_v:
+ case NEON::BI__builtin_neon_vfmlalq_high_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
+ }
+ case NEON::BI__builtin_neon_vfmlsl_high_v:
+ case NEON::BI__builtin_neon_vfmlslq_high_v: {
+ llvm::Type *InputTy =
+ llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
+ llvm::Type *Tys[2] = { Ty, InputTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
+ }
}
assert(Int && "Expected valid intrinsic number");
@@ -5506,10 +5837,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
- APSInt Value;
- if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
+ llvm::APSInt Value = Result.Val.getInt();
uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
llvm::InlineAsm *Emit =
@@ -5991,6 +6323,120 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case ARM::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+ case ARM::BI_InterlockedExchangeAdd8_acq:
+ case ARM::BI_InterlockedExchangeAdd16_acq:
+ case ARM::BI_InterlockedExchangeAdd_acq:
+ case ARM::BI_InterlockedExchangeAdd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
+ case ARM::BI_InterlockedExchangeAdd8_rel:
+ case ARM::BI_InterlockedExchangeAdd16_rel:
+ case ARM::BI_InterlockedExchangeAdd_rel:
+ case ARM::BI_InterlockedExchangeAdd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
+ case ARM::BI_InterlockedExchangeAdd8_nf:
+ case ARM::BI_InterlockedExchangeAdd16_nf:
+ case ARM::BI_InterlockedExchangeAdd_nf:
+ case ARM::BI_InterlockedExchangeAdd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
+ case ARM::BI_InterlockedExchange8_acq:
+ case ARM::BI_InterlockedExchange16_acq:
+ case ARM::BI_InterlockedExchange_acq:
+ case ARM::BI_InterlockedExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
+ case ARM::BI_InterlockedExchange8_rel:
+ case ARM::BI_InterlockedExchange16_rel:
+ case ARM::BI_InterlockedExchange_rel:
+ case ARM::BI_InterlockedExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
+ case ARM::BI_InterlockedExchange8_nf:
+ case ARM::BI_InterlockedExchange16_nf:
+ case ARM::BI_InterlockedExchange_nf:
+ case ARM::BI_InterlockedExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case ARM::BI_InterlockedCompareExchange8_acq:
+ case ARM::BI_InterlockedCompareExchange16_acq:
+ case ARM::BI_InterlockedCompareExchange_acq:
+ case ARM::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case ARM::BI_InterlockedCompareExchange8_rel:
+ case ARM::BI_InterlockedCompareExchange16_rel:
+ case ARM::BI_InterlockedCompareExchange_rel:
+ case ARM::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case ARM::BI_InterlockedCompareExchange8_nf:
+ case ARM::BI_InterlockedCompareExchange16_nf:
+ case ARM::BI_InterlockedCompareExchange_nf:
+ case ARM::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
+ case ARM::BI_InterlockedOr8_acq:
+ case ARM::BI_InterlockedOr16_acq:
+ case ARM::BI_InterlockedOr_acq:
+ case ARM::BI_InterlockedOr64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
+ case ARM::BI_InterlockedOr8_rel:
+ case ARM::BI_InterlockedOr16_rel:
+ case ARM::BI_InterlockedOr_rel:
+ case ARM::BI_InterlockedOr64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
+ case ARM::BI_InterlockedOr8_nf:
+ case ARM::BI_InterlockedOr16_nf:
+ case ARM::BI_InterlockedOr_nf:
+ case ARM::BI_InterlockedOr64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
+ case ARM::BI_InterlockedXor8_acq:
+ case ARM::BI_InterlockedXor16_acq:
+ case ARM::BI_InterlockedXor_acq:
+ case ARM::BI_InterlockedXor64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
+ case ARM::BI_InterlockedXor8_rel:
+ case ARM::BI_InterlockedXor16_rel:
+ case ARM::BI_InterlockedXor_rel:
+ case ARM::BI_InterlockedXor64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
+ case ARM::BI_InterlockedXor8_nf:
+ case ARM::BI_InterlockedXor16_nf:
+ case ARM::BI_InterlockedXor_nf:
+ case ARM::BI_InterlockedXor64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
+ case ARM::BI_InterlockedAnd8_acq:
+ case ARM::BI_InterlockedAnd16_acq:
+ case ARM::BI_InterlockedAnd_acq:
+ case ARM::BI_InterlockedAnd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
+ case ARM::BI_InterlockedAnd8_rel:
+ case ARM::BI_InterlockedAnd16_rel:
+ case ARM::BI_InterlockedAnd_rel:
+ case ARM::BI_InterlockedAnd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
+ case ARM::BI_InterlockedAnd8_nf:
+ case ARM::BI_InterlockedAnd16_nf:
+ case ARM::BI_InterlockedAnd_nf:
+ case ARM::BI_InterlockedAnd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
+ case ARM::BI_InterlockedIncrement16_acq:
+ case ARM::BI_InterlockedIncrement_acq:
+ case ARM::BI_InterlockedIncrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
+ case ARM::BI_InterlockedIncrement16_rel:
+ case ARM::BI_InterlockedIncrement_rel:
+ case ARM::BI_InterlockedIncrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
+ case ARM::BI_InterlockedIncrement16_nf:
+ case ARM::BI_InterlockedIncrement_nf:
+ case ARM::BI_InterlockedIncrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
+ case ARM::BI_InterlockedDecrement16_acq:
+ case ARM::BI_InterlockedDecrement_acq:
+ case ARM::BI_InterlockedDecrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
+ case ARM::BI_InterlockedDecrement16_rel:
+ case ARM::BI_InterlockedDecrement_rel:
+ case ARM::BI_InterlockedDecrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
+ case ARM::BI_InterlockedDecrement16_nf:
+ case ARM::BI_InterlockedDecrement_nf:
+ case ARM::BI_InterlockedDecrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
}
// Get the last argument, which specifies the vector type.
@@ -6497,11 +6943,33 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
}
+ if (BuiltinID == AArch64::BI__getReg) {
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ llvm::APSInt Value = Result.Val.getInt();
+ LLVMContext &Context = CGM.getLLVMContext();
+ std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
+
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+
+ llvm::Value *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ return Builder.CreateCall(F, Metadata);
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
+ if (BuiltinID == AArch64::BI_ReadWriteBarrier)
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::SyncScope::SingleThread);
+
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
@@ -6564,6 +7032,48 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg) {
+ LLVMContext &Context = CGM.getLLVMContext();
+
+ unsigned SysReg =
+ E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
+
+ std::string SysRegStr;
+ llvm::raw_string_ostream(SysRegStr) <<
+ ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
+ ((SysReg >> 11) & 7) << ":" <<
+ ((SysReg >> 7) & 15) << ":" <<
+ ((SysReg >> 3) & 15) << ":" <<
+ ( SysReg & 7);
+
+ llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+
+ llvm::Type *RegisterType = Int64Ty;
+ llvm::Type *ValueType = Int32Ty;
+ llvm::Type *Types[] = { RegisterType };
+
+ if (BuiltinID == AArch64::BI_ReadStatusReg) {
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
+ llvm::Value *Call = Builder.CreateCall(F, Metadata);
+
+ return Builder.CreateTrunc(Call, ValueType);
+ }
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
+ llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
+ ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
+
+ return Builder.CreateCall(F, { Metadata, ArgValue });
+ }
+
+ if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
+ llvm::Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ return Builder.CreateCall(F);
+ }
+
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
@@ -6659,7 +7169,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvth_f16_u32:
case NEON::BI__builtin_neon_vcvth_f16_u64:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_f16_s16:
case NEON::BI__builtin_neon_vcvth_f16_s32:
case NEON::BI__builtin_neon_vcvth_f16_s64: {
@@ -6679,7 +7189,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u16_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s16_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -6689,7 +7199,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u32_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s32_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -6699,7 +7209,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vcvth_u64_f16:
usgn = true;
- // FALL THROUGH
+ LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvth_s64_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
@@ -8414,6 +8924,129 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case AArch64::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+ case AArch64::BI_InterlockedExchangeAdd8_acq:
+ case AArch64::BI_InterlockedExchangeAdd16_acq:
+ case AArch64::BI_InterlockedExchangeAdd_acq:
+ case AArch64::BI_InterlockedExchangeAdd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
+ case AArch64::BI_InterlockedExchangeAdd8_rel:
+ case AArch64::BI_InterlockedExchangeAdd16_rel:
+ case AArch64::BI_InterlockedExchangeAdd_rel:
+ case AArch64::BI_InterlockedExchangeAdd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
+ case AArch64::BI_InterlockedExchangeAdd8_nf:
+ case AArch64::BI_InterlockedExchangeAdd16_nf:
+ case AArch64::BI_InterlockedExchangeAdd_nf:
+ case AArch64::BI_InterlockedExchangeAdd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
+ case AArch64::BI_InterlockedExchange8_acq:
+ case AArch64::BI_InterlockedExchange16_acq:
+ case AArch64::BI_InterlockedExchange_acq:
+ case AArch64::BI_InterlockedExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
+ case AArch64::BI_InterlockedExchange8_rel:
+ case AArch64::BI_InterlockedExchange16_rel:
+ case AArch64::BI_InterlockedExchange_rel:
+ case AArch64::BI_InterlockedExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
+ case AArch64::BI_InterlockedExchange8_nf:
+ case AArch64::BI_InterlockedExchange16_nf:
+ case AArch64::BI_InterlockedExchange_nf:
+ case AArch64::BI_InterlockedExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case AArch64::BI_InterlockedCompareExchange8_acq:
+ case AArch64::BI_InterlockedCompareExchange16_acq:
+ case AArch64::BI_InterlockedCompareExchange_acq:
+ case AArch64::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case AArch64::BI_InterlockedCompareExchange8_rel:
+ case AArch64::BI_InterlockedCompareExchange16_rel:
+ case AArch64::BI_InterlockedCompareExchange_rel:
+ case AArch64::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case AArch64::BI_InterlockedCompareExchange8_nf:
+ case AArch64::BI_InterlockedCompareExchange16_nf:
+ case AArch64::BI_InterlockedCompareExchange_nf:
+ case AArch64::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
+ case AArch64::BI_InterlockedOr8_acq:
+ case AArch64::BI_InterlockedOr16_acq:
+ case AArch64::BI_InterlockedOr_acq:
+ case AArch64::BI_InterlockedOr64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
+ case AArch64::BI_InterlockedOr8_rel:
+ case AArch64::BI_InterlockedOr16_rel:
+ case AArch64::BI_InterlockedOr_rel:
+ case AArch64::BI_InterlockedOr64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
+ case AArch64::BI_InterlockedOr8_nf:
+ case AArch64::BI_InterlockedOr16_nf:
+ case AArch64::BI_InterlockedOr_nf:
+ case AArch64::BI_InterlockedOr64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
+ case AArch64::BI_InterlockedXor8_acq:
+ case AArch64::BI_InterlockedXor16_acq:
+ case AArch64::BI_InterlockedXor_acq:
+ case AArch64::BI_InterlockedXor64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
+ case AArch64::BI_InterlockedXor8_rel:
+ case AArch64::BI_InterlockedXor16_rel:
+ case AArch64::BI_InterlockedXor_rel:
+ case AArch64::BI_InterlockedXor64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
+ case AArch64::BI_InterlockedXor8_nf:
+ case AArch64::BI_InterlockedXor16_nf:
+ case AArch64::BI_InterlockedXor_nf:
+ case AArch64::BI_InterlockedXor64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
+ case AArch64::BI_InterlockedAnd8_acq:
+ case AArch64::BI_InterlockedAnd16_acq:
+ case AArch64::BI_InterlockedAnd_acq:
+ case AArch64::BI_InterlockedAnd64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
+ case AArch64::BI_InterlockedAnd8_rel:
+ case AArch64::BI_InterlockedAnd16_rel:
+ case AArch64::BI_InterlockedAnd_rel:
+ case AArch64::BI_InterlockedAnd64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
+ case AArch64::BI_InterlockedAnd8_nf:
+ case AArch64::BI_InterlockedAnd16_nf:
+ case AArch64::BI_InterlockedAnd_nf:
+ case AArch64::BI_InterlockedAnd64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
+ case AArch64::BI_InterlockedIncrement16_acq:
+ case AArch64::BI_InterlockedIncrement_acq:
+ case AArch64::BI_InterlockedIncrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
+ case AArch64::BI_InterlockedIncrement16_rel:
+ case AArch64::BI_InterlockedIncrement_rel:
+ case AArch64::BI_InterlockedIncrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
+ case AArch64::BI_InterlockedIncrement16_nf:
+ case AArch64::BI_InterlockedIncrement_nf:
+ case AArch64::BI_InterlockedIncrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
+ case AArch64::BI_InterlockedDecrement16_acq:
+ case AArch64::BI_InterlockedDecrement_acq:
+ case AArch64::BI_InterlockedDecrement64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
+ case AArch64::BI_InterlockedDecrement16_rel:
+ case AArch64::BI_InterlockedDecrement_rel:
+ case AArch64::BI_InterlockedDecrement64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
+ case AArch64::BI_InterlockedDecrement16_nf:
+ case AArch64::BI_InterlockedDecrement_nf:
+ case AArch64::BI_InterlockedDecrement64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
+
+ case AArch64::BI_InterlockedAdd: {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add, Arg0, Arg1,
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, Arg1);
+ }
}
}
@@ -8524,8 +9157,9 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF,
}
static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
- unsigned NumElts, ArrayRef<Value *> Ops,
+ ArrayRef<Value *> Ops,
bool InvertLHS = false) {
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
@@ -8533,7 +9167,25 @@ static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
LHS = CGF.Builder.CreateNot(LHS);
return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
- CGF.Builder.getIntNTy(std::max(NumElts, 8U)));
+ Ops[0]->getType());
+}
+
+static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
+ Value *Amt, bool IsRight) {
+ llvm::Type *Ty = Op0->getType();
+
+ // Amount may be scalar immediate, in which case create a splat vector.
+ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+ // we only care about the lowest log2 bits anyway.
+ if (Amt->getType() != Ty) {
+ unsigned NumElts = Ty->getVectorNumElements();
+ Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+ Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
+ }
+
+ unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Value *F = CGF.CGM.getIntrinsic(IID, Ty);
+ return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
}
static Value *EmitX86Select(CodeGenFunction &CGF,
@@ -8855,6 +9507,17 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
+// Emit addition or subtraction with signed/unsigned saturation.
+static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops, bool IsSigned,
+ bool IsAddition) {
+ Intrinsic::ID IID =
+ IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
+ : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
+ llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
+ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
+}
+
Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
@@ -8876,6 +9539,7 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+ cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
// Calculate the index needed to access the correct field based on the
// range. Also adjust the expected value.
@@ -8911,17 +9575,17 @@ Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
return EmitX86CpuSupports(FeatureStr);
}
-uint32_t
+uint64_t
CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
// Processor features and mapping to processor feature value.
- uint32_t FeaturesMask = 0;
+ uint64_t FeaturesMask = 0;
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
#include "llvm/Support/X86TargetParser.def"
;
- FeaturesMask |= (1U << Feature);
+ FeaturesMask |= (1ULL << Feature);
}
return FeaturesMask;
}
@@ -8930,37 +9594,66 @@ Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
}
-llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint32_t FeaturesMask) {
- // Matching the struct layout from the compiler-rt/libgcc structure that is
- // filled in:
- // unsigned int __cpu_vendor;
- // unsigned int __cpu_type;
- // unsigned int __cpu_subtype;
- // unsigned int __cpu_features[1];
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
- llvm::ArrayType::get(Int32Ty, 1));
+llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
+ uint32_t Features1 = Lo_32(FeaturesMask);
+ uint32_t Features2 = Hi_32(FeaturesMask);
- // Grab the global __cpu_model.
- llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+ Value *Result = Builder.getTrue();
+
+ if (Features1 != 0) {
+ // Matching the struct layout from the compiler-rt/libgcc structure that is
+ // filled in:
+ // unsigned int __cpu_vendor;
+ // unsigned int __cpu_type;
+ // unsigned int __cpu_subtype;
+ // unsigned int __cpu_features[1];
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
+ llvm::ArrayType::get(Int32Ty, 1));
+
+ // Grab the global __cpu_model.
+ llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
+ cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
+
+ // Grab the first (0th) element from the field __cpu_features off of the
+ // global in the struct STy.
+ Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
+ Builder.getInt32(0)};
+ Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
+ Value *Features =
+ Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
+
+ // Check the value of the bit corresponding to the feature requested.
+ Value *Mask = Builder.getInt32(Features1);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, Cmp);
+ }
- // Grab the first (0th) element from the field __cpu_features off of the
- // global in the struct STy.
- Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 3),
- ConstantInt::get(Int32Ty, 0)};
- Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
-
- // Check the value of the bit corresponding to the feature requested.
- Value *Bitset = Builder.CreateAnd(
- Features, llvm::ConstantInt::get(Int32Ty, FeaturesMask));
- return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
+ if (Features2 != 0) {
+ llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
+ "__cpu_features2");
+ cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
+
+ Value *Features =
+ Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
+
+ // Check the value of the bit corresponding to the feature requested.
+ Value *Mask = Builder.getInt32(Features2);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, Cmp);
+ }
+
+ return Result;
}
Value *CodeGenFunction::EmitX86CpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
/*Variadic*/ false);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
+ cast<llvm::GlobalValue>(Func)->setDSOLocal(true);
+ cast<llvm::GlobalValue>(Func)->setDLLStorageClass(
+ llvm::GlobalValue::DefaultStorageClass);
return Builder.CreateCall(Func);
}
@@ -9051,6 +9744,24 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__rdtsc: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
}
+ case X86::BI__builtin_ia32_rdtscp: {
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
+ Ops[0]);
+ return Builder.CreateExtractValue(Call, 0);
+ }
+ case X86::BI__builtin_ia32_lzcnt_u16:
+ case X86::BI__builtin_ia32_lzcnt_u32:
+ case X86::BI__builtin_ia32_lzcnt_u64: {
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ }
+ case X86::BI__builtin_ia32_tzcnt_u16:
+ case X86::BI__builtin_ia32_tzcnt_u32:
+ case X86::BI__builtin_ia32_tzcnt_u64: {
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ }
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
@@ -9822,6 +10533,50 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
"psrldq");
return Builder.CreateBitCast(SV, ResultType, "cast");
}
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+
+ if (ShiftVal >= NumElts)
+ return llvm::Constant::getNullValue(Ops[0]->getType());
+
+ Value *In = getMaskVecValue(*this, Ops[0], NumElts);
+
+ uint32_t Indices[64];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = NumElts + i - ShiftVal;
+
+ Value *Zero = llvm::Constant::getNullValue(In->getType());
+ Value *SV = Builder.CreateShuffleVector(Zero, In,
+ makeArrayRef(Indices, NumElts),
+ "kshiftl");
+ return Builder.CreateBitCast(SV, Ops[0]->getType());
+ }
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi: {
+ unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+
+ if (ShiftVal >= NumElts)
+ return llvm::Constant::getNullValue(Ops[0]->getType());
+
+ Value *In = getMaskVecValue(*this, Ops[0], NumElts);
+
+ uint32_t Indices[64];
+ for (unsigned i = 0; i != NumElts; ++i)
+ Indices[i] = i + ShiftVal;
+
+ Value *Zero = llvm::Constant::getNullValue(In->getType());
+ Value *SV = Builder.CreateShuffleVector(In, Zero,
+ makeArrayRef(Indices, NumElts),
+ "kshiftr");
+ return Builder.CreateBitCast(SV, Ops[0]->getType());
+ }
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
@@ -9847,7 +10602,41 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
SI->setAlignment(1);
return SI;
}
-
+ // Rotate is a special case of funnel shift - 1st 2 args are the same.
+ case X86::BI__builtin_ia32_vprotb:
+ case X86::BI__builtin_ia32_vprotw:
+ case X86::BI__builtin_ia32_vprotd:
+ case X86::BI__builtin_ia32_vprotq:
+ case X86::BI__builtin_ia32_vprotbi:
+ case X86::BI__builtin_ia32_vprotwi:
+ case X86::BI__builtin_ia32_vprotdi:
+ case X86::BI__builtin_ia32_vprotqi:
+ case X86::BI__builtin_ia32_prold128:
+ case X86::BI__builtin_ia32_prold256:
+ case X86::BI__builtin_ia32_prold512:
+ case X86::BI__builtin_ia32_prolq128:
+ case X86::BI__builtin_ia32_prolq256:
+ case X86::BI__builtin_ia32_prolq512:
+ case X86::BI__builtin_ia32_prolvd128:
+ case X86::BI__builtin_ia32_prolvd256:
+ case X86::BI__builtin_ia32_prolvd512:
+ case X86::BI__builtin_ia32_prolvq128:
+ case X86::BI__builtin_ia32_prolvq256:
+ case X86::BI__builtin_ia32_prolvq512:
+ return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
+ case X86::BI__builtin_ia32_prord128:
+ case X86::BI__builtin_ia32_prord256:
+ case X86::BI__builtin_ia32_prord512:
+ case X86::BI__builtin_ia32_prorq128:
+ case X86::BI__builtin_ia32_prorq256:
+ case X86::BI__builtin_ia32_prorq512:
+ case X86::BI__builtin_ia32_prorvd128:
+ case X86::BI__builtin_ia32_prorvd256:
+ case X86::BI__builtin_ia32_prorvd512:
+ case X86::BI__builtin_ia32_prorvq128:
+ case X86::BI__builtin_ia32_prorvq256:
+ case X86::BI__builtin_ia32_prorvq512:
+ return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
case X86::BI__builtin_ia32_selectb_128:
case X86::BI__builtin_ia32_selectb_256:
case X86::BI__builtin_ia32_selectb_512:
@@ -9905,38 +10694,147 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86MaskedCompare(*this, CC, false, Ops);
}
+ case X86::BI__builtin_ia32_kortestcqi:
case X86::BI__builtin_ia32_kortestchi:
- case X86::BI__builtin_ia32_kortestzhi: {
- Value *Or = EmitX86MaskLogic(*this, Instruction::Or, 16, Ops);
- Value *C;
- if (BuiltinID == X86::BI__builtin_ia32_kortestchi)
- C = llvm::Constant::getAllOnesValue(Builder.getInt16Ty());
- else
- C = llvm::Constant::getNullValue(Builder.getInt16Ty());
+ case X86::BI__builtin_ia32_kortestcsi:
+ case X86::BI__builtin_ia32_kortestcdi: {
+ Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
+ Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
Value *Cmp = Builder.CreateICmpEQ(Or, C);
return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
}
+ case X86::BI__builtin_ia32_kortestzqi:
+ case X86::BI__builtin_ia32_kortestzhi:
+ case X86::BI__builtin_ia32_kortestzsi:
+ case X86::BI__builtin_ia32_kortestzdi: {
+ Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
+ Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
+ Value *Cmp = Builder.CreateICmpEQ(Or, C);
+ return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
+ }
+
+ case X86::BI__builtin_ia32_ktestcqi:
+ case X86::BI__builtin_ia32_ktestzqi:
+ case X86::BI__builtin_ia32_ktestchi:
+ case X86::BI__builtin_ia32_ktestzhi:
+ case X86::BI__builtin_ia32_ktestcsi:
+ case X86::BI__builtin_ia32_ktestzsi:
+ case X86::BI__builtin_ia32_ktestcdi:
+ case X86::BI__builtin_ia32_ktestzdi: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_ktestcqi:
+ IID = Intrinsic::x86_avx512_ktestc_b;
+ break;
+ case X86::BI__builtin_ia32_ktestzqi:
+ IID = Intrinsic::x86_avx512_ktestz_b;
+ break;
+ case X86::BI__builtin_ia32_ktestchi:
+ IID = Intrinsic::x86_avx512_ktestc_w;
+ break;
+ case X86::BI__builtin_ia32_ktestzhi:
+ IID = Intrinsic::x86_avx512_ktestz_w;
+ break;
+ case X86::BI__builtin_ia32_ktestcsi:
+ IID = Intrinsic::x86_avx512_ktestc_d;
+ break;
+ case X86::BI__builtin_ia32_ktestzsi:
+ IID = Intrinsic::x86_avx512_ktestz_d;
+ break;
+ case X86::BI__builtin_ia32_ktestcdi:
+ IID = Intrinsic::x86_avx512_ktestc_q;
+ break;
+ case X86::BI__builtin_ia32_ktestzdi:
+ IID = Intrinsic::x86_avx512_ktestz_q;
+ break;
+ }
+
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+ Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
+ Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
+ Function *Intr = CGM.getIntrinsic(IID);
+ return Builder.CreateCall(Intr, {LHS, RHS});
+ }
+ case X86::BI__builtin_ia32_kaddqi:
+ case X86::BI__builtin_ia32_kaddhi:
+ case X86::BI__builtin_ia32_kaddsi:
+ case X86::BI__builtin_ia32_kadddi: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_kaddqi:
+ IID = Intrinsic::x86_avx512_kadd_b;
+ break;
+ case X86::BI__builtin_ia32_kaddhi:
+ IID = Intrinsic::x86_avx512_kadd_w;
+ break;
+ case X86::BI__builtin_ia32_kaddsi:
+ IID = Intrinsic::x86_avx512_kadd_d;
+ break;
+ case X86::BI__builtin_ia32_kadddi:
+ IID = Intrinsic::x86_avx512_kadd_q;
+ break;
+ }
+
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+ Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
+ Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
+ Function *Intr = CGM.getIntrinsic(IID);
+ Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
+ return Builder.CreateBitCast(Res, Ops[0]->getType());
+ }
+ case X86::BI__builtin_ia32_kandqi:
case X86::BI__builtin_ia32_kandhi:
- return EmitX86MaskLogic(*this, Instruction::And, 16, Ops);
+ case X86::BI__builtin_ia32_kandsi:
+ case X86::BI__builtin_ia32_kanddi:
+ return EmitX86MaskLogic(*this, Instruction::And, Ops);
+ case X86::BI__builtin_ia32_kandnqi:
case X86::BI__builtin_ia32_kandnhi:
- return EmitX86MaskLogic(*this, Instruction::And, 16, Ops, true);
+ case X86::BI__builtin_ia32_kandnsi:
+ case X86::BI__builtin_ia32_kandndi:
+ return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
+ case X86::BI__builtin_ia32_korqi:
case X86::BI__builtin_ia32_korhi:
- return EmitX86MaskLogic(*this, Instruction::Or, 16, Ops);
+ case X86::BI__builtin_ia32_korsi:
+ case X86::BI__builtin_ia32_kordi:
+ return EmitX86MaskLogic(*this, Instruction::Or, Ops);
+ case X86::BI__builtin_ia32_kxnorqi:
case X86::BI__builtin_ia32_kxnorhi:
- return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops, true);
+ case X86::BI__builtin_ia32_kxnorsi:
+ case X86::BI__builtin_ia32_kxnordi:
+ return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
+ case X86::BI__builtin_ia32_kxorqi:
case X86::BI__builtin_ia32_kxorhi:
- return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops);
- case X86::BI__builtin_ia32_knothi: {
- Ops[0] = getMaskVecValue(*this, Ops[0], 16);
- return Builder.CreateBitCast(Builder.CreateNot(Ops[0]),
- Builder.getInt16Ty());
+ case X86::BI__builtin_ia32_kxorsi:
+ case X86::BI__builtin_ia32_kxordi:
+ return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
+ case X86::BI__builtin_ia32_knotqi:
+ case X86::BI__builtin_ia32_knothi:
+ case X86::BI__builtin_ia32_knotsi:
+ case X86::BI__builtin_ia32_knotdi: {
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+ Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
+ return Builder.CreateBitCast(Builder.CreateNot(Res),
+ Ops[0]->getType());
+ }
+ case X86::BI__builtin_ia32_kmovb:
+ case X86::BI__builtin_ia32_kmovw:
+ case X86::BI__builtin_ia32_kmovd:
+ case X86::BI__builtin_ia32_kmovq: {
+ // Bitcast to vXi1 type and then back to integer. This gets the mask
+ // register type into the IR, but might be optimized out depending on
+ // what's around it.
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
+ Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
+ return Builder.CreateBitCast(Res, Ops[0]->getType());
}
case X86::BI__builtin_ia32_kunpckdi:
case X86::BI__builtin_ia32_kunpcksi:
case X86::BI__builtin_ia32_kunpckhi: {
- unsigned NumElts = Ops[0]->getType()->getScalarSizeInBits();
+ unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
uint32_t Indices[64];
@@ -10103,6 +11001,52 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_pternlogq256_maskz:
return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
+ case X86::BI__builtin_ia32_vpshldd128:
+ case X86::BI__builtin_ia32_vpshldd256:
+ case X86::BI__builtin_ia32_vpshldd512:
+ case X86::BI__builtin_ia32_vpshldq128:
+ case X86::BI__builtin_ia32_vpshldq256:
+ case X86::BI__builtin_ia32_vpshldq512:
+ case X86::BI__builtin_ia32_vpshldw128:
+ case X86::BI__builtin_ia32_vpshldw256:
+ case X86::BI__builtin_ia32_vpshldw512:
+ return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
+
+ case X86::BI__builtin_ia32_vpshrdd128:
+ case X86::BI__builtin_ia32_vpshrdd256:
+ case X86::BI__builtin_ia32_vpshrdd512:
+ case X86::BI__builtin_ia32_vpshrdq128:
+ case X86::BI__builtin_ia32_vpshrdq256:
+ case X86::BI__builtin_ia32_vpshrdq512:
+ case X86::BI__builtin_ia32_vpshrdw128:
+ case X86::BI__builtin_ia32_vpshrdw256:
+ case X86::BI__builtin_ia32_vpshrdw512:
+ // Ops 0 and 1 are swapped.
+ return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
+
+ case X86::BI__builtin_ia32_vpshldvd128:
+ case X86::BI__builtin_ia32_vpshldvd256:
+ case X86::BI__builtin_ia32_vpshldvd512:
+ case X86::BI__builtin_ia32_vpshldvq128:
+ case X86::BI__builtin_ia32_vpshldvq256:
+ case X86::BI__builtin_ia32_vpshldvq512:
+ case X86::BI__builtin_ia32_vpshldvw128:
+ case X86::BI__builtin_ia32_vpshldvw256:
+ case X86::BI__builtin_ia32_vpshldvw512:
+ return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
+
+ case X86::BI__builtin_ia32_vpshrdvd128:
+ case X86::BI__builtin_ia32_vpshrdvd256:
+ case X86::BI__builtin_ia32_vpshrdvd512:
+ case X86::BI__builtin_ia32_vpshrdvq128:
+ case X86::BI__builtin_ia32_vpshrdvq256:
+ case X86::BI__builtin_ia32_vpshrdvq512:
+ case X86::BI__builtin_ia32_vpshrdvw128:
+ case X86::BI__builtin_ia32_vpshrdvw256:
+ case X86::BI__builtin_ia32_vpshrdvw512:
+ // Ops 0 and 1 are swapped.
+ return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
+
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
@@ -10145,6 +11089,33 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
+ case X86::BI__builtin_ia32_addcarryx_u32:
+ case X86::BI__builtin_ia32_addcarryx_u64:
+ case X86::BI__builtin_ia32_subborrow_u32:
+ case X86::BI__builtin_ia32_subborrow_u64: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_addcarryx_u32:
+ IID = Intrinsic::x86_addcarry_32;
+ break;
+ case X86::BI__builtin_ia32_addcarryx_u64:
+ IID = Intrinsic::x86_addcarry_64;
+ break;
+ case X86::BI__builtin_ia32_subborrow_u32:
+ IID = Intrinsic::x86_subborrow_32;
+ break;
+ case X86::BI__builtin_ia32_subborrow_u64:
+ IID = Intrinsic::x86_subborrow_64;
+ break;
+ }
+
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
+ { Ops[0], Ops[1], Ops[2] });
+ Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
+ Ops[3]);
+ return Builder.CreateExtractValue(Call, 0);
+ }
case X86::BI__builtin_ia32_fpclassps128_mask:
case X86::BI__builtin_ia32_fpclassps256_mask:
@@ -10183,6 +11154,51 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
}
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ case X86::BI__builtin_ia32_vpmultishiftqb512: {
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_vpmultishiftqb128:
+ ID = Intrinsic::x86_avx512_pmultishift_qb_128;
+ break;
+ case X86::BI__builtin_ia32_vpmultishiftqb256:
+ ID = Intrinsic::x86_avx512_pmultishift_qb_256;
+ break;
+ case X86::BI__builtin_ia32_vpmultishiftqb512:
+ ID = Intrinsic::x86_avx512_pmultishift_qb_512;
+ break;
+ }
+
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
+
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
+ unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
+ Value *MaskIn = Ops[2];
+ Ops.erase(&Ops[2]);
+
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
+ ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
+ break;
+ case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
+ ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
+ break;
+ case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
+ ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
+ break;
+ }
+
+ Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
+ }
+
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
@@ -10361,6 +11377,27 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::System);
}
+ case X86::BI__shiftleft128:
+ case X86::BI__shiftright128: {
+ // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
+ // llvm::Function *F = CGM.getIntrinsic(
+ // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
+ // Int64Ty);
+ // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+ // return Builder.CreateCall(F, Ops);
+ llvm::Type *Int128Ty = Builder.getInt128Ty();
+ Value *Val = Builder.CreateOr(
+ Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64),
+ Builder.CreateZExt(Ops[0], Int128Ty));
+ Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
+ llvm::ConstantInt::get(Int128Ty, 0x3f));
+ Value *Res;
+ if (BuiltinID == X86::BI__shiftleft128)
+ Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
+ else
+ Res = Builder.CreateLShr(Val, Amt);
+ return Builder.CreateTrunc(Res, Int64Ty);
+ }
case X86::BI_ReadWriteBarrier:
case X86::BI_ReadBarrier:
case X86::BI_WriteBarrier: {
@@ -10401,14 +11438,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
Value *Destination =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PtrTy);
- Value *ExchangeHigh128 =
- Builder.CreateZExt(EmitScalarExpr(E->getArg(1)), Int128Ty);
- Value *ExchangeLow128 =
- Builder.CreateZExt(EmitScalarExpr(E->getArg(2)), Int128Ty);
- Address ComparandResult(
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int128PtrTy),
- getContext().toCharUnitsFromBits(128));
+ Builder.CreateBitCast(Ops[0], Int128PtrTy);
+ Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
+ Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
+ Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
+ getContext().toCharUnitsFromBits(128));
Value *Exchange = Builder.CreateOr(
Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
@@ -10459,8 +11493,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readfsdword:
case X86::BI__readfsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
- llvm::PointerType::get(IntTy, 257));
+ Value *Ptr =
+ Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
@@ -10471,17 +11505,44 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readgsdword:
case X86::BI__readgsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
- llvm::PointerType::get(IntTy, 256));
+ Value *Ptr =
+ Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
+ case X86::BI__builtin_ia32_paddsb512:
+ case X86::BI__builtin_ia32_paddsw512:
+ case X86::BI__builtin_ia32_paddsb256:
+ case X86::BI__builtin_ia32_paddsw256:
+ case X86::BI__builtin_ia32_paddsb128:
+ case X86::BI__builtin_ia32_paddsw128:
+ return EmitX86AddSubSatExpr(*this, Ops, true, true);
+ case X86::BI__builtin_ia32_paddusb512:
+ case X86::BI__builtin_ia32_paddusw512:
+ case X86::BI__builtin_ia32_paddusb256:
+ case X86::BI__builtin_ia32_paddusw256:
+ case X86::BI__builtin_ia32_paddusb128:
+ case X86::BI__builtin_ia32_paddusw128:
+ return EmitX86AddSubSatExpr(*this, Ops, false, true);
+ case X86::BI__builtin_ia32_psubsb512:
+ case X86::BI__builtin_ia32_psubsw512:
+ case X86::BI__builtin_ia32_psubsb256:
+ case X86::BI__builtin_ia32_psubsw256:
+ case X86::BI__builtin_ia32_psubsb128:
+ case X86::BI__builtin_ia32_psubsw128:
+ return EmitX86AddSubSatExpr(*this, Ops, true, false);
+ case X86::BI__builtin_ia32_psubusb512:
+ case X86::BI__builtin_ia32_psubusw512:
+ case X86::BI__builtin_ia32_psubusb256:
+ case X86::BI__builtin_ia32_psubusw256:
+ case X86::BI__builtin_ia32_psubusb128:
+ case X86::BI__builtin_ia32_psubusw128:
+ return EmitX86AddSubSatExpr(*this, Ops, false, false);
}
}
-
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
@@ -10901,6 +11962,28 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
+
+ case PPC::BI__builtin_pack_vector_int128: {
+ bool isLittleEndian = getTarget().isLittleEndian();
+ Value *UndefValue =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
+ Value *Res = Builder.CreateInsertElement(
+ UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
+ Res = Builder.CreateInsertElement(Res, Ops[1],
+ (uint64_t)(isLittleEndian ? 0 : 1));
+ return Builder.CreateBitCast(Res, ConvertType(E->getType()));
+ }
+
+ case PPC::BI__builtin_unpack_vector_int128: {
+ ConstantInt *Index = cast<ConstantInt>(Ops[1]);
+ Value *Unpacked = Builder.CreateBitCast(
+ Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
+
+ if (getTarget().isLittleEndian())
+ Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
+
+ return Builder.CreateExtractElement(Unpacked, Index);
+ }
}
}
@@ -10948,12 +12031,16 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
- case AMDGPU::BI__builtin_amdgcn_mov_dpp: {
- llvm::SmallVector<llvm::Value *, 5> Args;
- for (unsigned I = 0; I != 5; ++I)
+ case AMDGPU::BI__builtin_amdgcn_mov_dpp:
+ case AMDGPU::BI__builtin_amdgcn_update_dpp: {
+ llvm::SmallVector<llvm::Value *, 6> Args;
+ for (unsigned I = 0; I != E->getNumArgs(); ++I)
Args.push_back(EmitScalarExpr(E->getArg(I)));
- Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_mov_dpp,
- Args[0]->getType());
+ assert(Args.size() == 5 || Args.size() == 6);
+ if (Args.size() == 5)
+ Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
+ Value *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
}
case AMDGPU::BI__builtin_amdgcn_div_fixup:
@@ -11039,50 +12126,6 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CI->setConvergent();
return CI;
}
- case AMDGPU::BI__builtin_amdgcn_ds_faddf:
- case AMDGPU::BI__builtin_amdgcn_ds_fminf:
- case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
- llvm::SmallVector<llvm::Value *, 5> Args;
- for (unsigned I = 0; I != 5; ++I)
- Args.push_back(EmitScalarExpr(E->getArg(I)));
- const llvm::Type *PtrTy = Args[0]->getType();
- // check pointer parameter
- if (!PtrTy->isPointerTy() ||
- E->getArg(0)
- ->getType()
- ->getPointeeType()
- .getQualifiers()
- .getAddressSpace() != LangAS::opencl_local ||
- !PtrTy->getPointerElementType()->isFloatTy()) {
- CGM.Error(E->getArg(0)->getLocStart(),
- "parameter should have type \"local float*\"");
- return nullptr;
- }
- // check float parameter
- if (!Args[1]->getType()->isFloatTy()) {
- CGM.Error(E->getArg(1)->getLocStart(),
- "parameter should have type \"float\"");
- return nullptr;
- }
-
- Intrinsic::ID ID;
- switch (BuiltinID) {
- case AMDGPU::BI__builtin_amdgcn_ds_faddf:
- ID = Intrinsic::amdgcn_ds_fadd;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_fminf:
- ID = Intrinsic::amdgcn_ds_fmin;
- break;
- case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
- ID = Intrinsic::amdgcn_ds_fmax;
- break;
- default:
- llvm_unreachable("Unknown BuiltinID");
- }
- Value *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, Args);
- }
-
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
@@ -11363,7 +12406,7 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {X, Y, M4Value});
}
- // Vector intrisincs that output the post-instruction CC value.
+ // Vector intrinsics that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
case SystemZ::BI__builtin_##NAME: \
@@ -11823,7 +12866,7 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
bool isColMajor = isColMajorArg.getSExtValue();
unsigned IID;
unsigned NumResults = 8;
- // PTX Instructions (and LLVM instrinsics) are defined for slice _d_, yet
+ // PTX Instructions (and LLVM intrinsics) are defined for slice _d_, yet
// for some reason nvcc builtins use _c_.
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
@@ -12046,31 +13089,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
- case WebAssembly::BI__builtin_wasm_mem_size: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *I = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_size, ResultType);
- return Builder.CreateCall(Callee, I);
- }
- case WebAssembly::BI__builtin_wasm_mem_grow: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Args[] = {
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1))
- };
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_grow, ResultType);
- return Builder.CreateCall(Callee, Args);
- }
- case WebAssembly::BI__builtin_wasm_current_memory: {
- llvm::Type *ResultType = ConvertType(E->getType());
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_current_memory, ResultType);
- return Builder.CreateCall(Callee);
- }
- case WebAssembly::BI__builtin_wasm_grow_memory: {
- Value *X = EmitScalarExpr(E->getArg(0));
- Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType());
- return Builder.CreateCall(Callee, X);
- }
case WebAssembly::BI__builtin_wasm_throw: {
Value *Tag = EmitScalarExpr(E->getArg(0));
Value *Obj = EmitScalarExpr(E->getArg(1));
@@ -12081,6 +13099,211 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
return Builder.CreateCall(Callee);
}
+ case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
+ Value *Addr = EmitScalarExpr(E->getArg(0));
+ Value *Expected = EmitScalarExpr(E->getArg(1));
+ Value *Timeout = EmitScalarExpr(E->getArg(2));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
+ return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
+ }
+ case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
+ Value *Addr = EmitScalarExpr(E->getArg(0));
+ Value *Expected = EmitScalarExpr(E->getArg(1));
+ Value *Timeout = EmitScalarExpr(E->getArg(2));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
+ return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
+ }
+ case WebAssembly::BI__builtin_wasm_atomic_notify: {
+ Value *Addr = EmitScalarExpr(E->getArg(0));
+ Value *Count = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
+ return Builder.CreateCall(Callee, {Addr, Count});
+ }
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
+ Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ResT = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
+ {ResT, Src->getType()});
+ return Builder.CreateCall(Callee, {Src});
+ }
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
+ Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ResT = ConvertType(E->getType());
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
+ {ResT, Src->getType()});
+ return Builder.CreateCall(Callee, {Src});
+ }
+ case WebAssembly::BI__builtin_wasm_min_f32:
+ case WebAssembly::BI__builtin_wasm_min_f64:
+ case WebAssembly::BI__builtin_wasm_min_f32x4:
+ case WebAssembly::BI__builtin_wasm_min_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::minimum,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_max_f32:
+ case WebAssembly::BI__builtin_wasm_max_f64:
+ case WebAssembly::BI__builtin_wasm_max_f32x4:
+ case WebAssembly::BI__builtin_wasm_max_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::maximum,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ Value *Extract = Builder.CreateExtractElement(Vec, Lane);
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
+ return Builder.CreateSExt(Extract, ConvertType(E->getType()));
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
+ return Builder.CreateZExt(Extract, ConvertType(E->getType()));
+ case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
+ return Extract;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ }
+ case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
+ llvm::APSInt LaneConst;
+ if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
+ llvm_unreachable("Constant arg isn't actually constant?");
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
+ llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
+ Value *Trunc = Builder.CreateTrunc(Val, ElemType);
+ return Builder.CreateInsertElement(Vec, Trunc, Lane);
+ }
+ case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
+ case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
+ return Builder.CreateInsertElement(Vec, Val, Lane);
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ }
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
+ IntNo = Intrinsic::sadd_sat;
+ break;
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
+ IntNo = Intrinsic::uadd_sat;
+ break;
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
+ IntNo = Intrinsic::wasm_sub_saturate_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
+ IntNo = Intrinsic::wasm_sub_saturate_unsigned;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_bitselect: {
+ Value *V1 = EmitScalarExpr(E->getArg(0));
+ Value *V2 = EmitScalarExpr(E->getArg(1));
+ Value *C = EmitScalarExpr(E->getArg(2));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
+ ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {V1, V2, C});
+ }
+ case WebAssembly::BI__builtin_wasm_any_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_any_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_any_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ case WebAssembly::BI__builtin_wasm_all_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_all_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_all_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_any_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_any_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_any_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ IntNo = Intrinsic::wasm_anytrue;
+ break;
+ case WebAssembly::BI__builtin_wasm_all_true_i8x16:
+ case WebAssembly::BI__builtin_wasm_all_true_i16x8:
+ case WebAssembly::BI__builtin_wasm_all_true_i32x4:
+ case WebAssembly::BI__builtin_wasm_all_true_i64x2:
+ IntNo = Intrinsic::wasm_alltrue;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_abs_f32x4:
+ case WebAssembly::BI__builtin_wasm_abs_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
+ case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ Value *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
+ return Builder.CreateCall(Callee, {Vec});
+ }
default:
return nullptr;
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 5fcc9e011bcb..1c578bd151bd 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -137,7 +137,7 @@ CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
- RelocatableDeviceCode(CGM.getLangOpts().CUDARelocatableDeviceCode) {
+ RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -353,8 +353,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// global variable and save a reference in GpuBinaryHandle to be cleaned up
// in destructor on exit. Then associate all known kernels with the GPU binary
// handle so CUDA runtime can figure out what to call on the GPU side.
- std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary;
- if (!IsHIP) {
+ std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
+ if (!CudaGpuBinaryFileName.empty()) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
@@ -388,15 +388,23 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
ModuleIDSectionName = "__hip_module_id";
ModuleIDPrefix = "__hip_";
- // For HIP, create an external symbol __hip_fatbin in section .hip_fatbin.
- // The external symbol is supposed to contain the fat binary but will be
- // populated somewhere else, e.g. by lld through link script.
- FatBinStr = new llvm::GlobalVariable(
+ if (CudaGpuBinary) {
+ // If fatbin is available from early finalization, create a string
+ // literal containing the fat binary loaded from the given file.
+ FatBinStr = makeConstantString(CudaGpuBinary->getBuffer(), "",
+ FatbinConstantName, 8);
+ } else {
+ // If fatbin is not available, create an external symbol
+ // __hip_fatbin in section .hip_fatbin. The external symbol is supposed
+ // to contain the fat binary but will be populated somewhere else,
+ // e.g. by lld through link script.
+ FatBinStr = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty,
/*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
"__hip_fatbin", nullptr,
llvm::GlobalVariable::NotThreadLocal);
- cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
+ cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
+ }
FatMagic = HIPFatMagic;
} else {
@@ -447,6 +455,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// thread safety of the loaded program. Therefore we can assume sequential
// execution of constructor functions here.
if (IsHIP) {
+ auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
+ llvm::GlobalValue::LinkOnceAnyLinkage;
llvm::BasicBlock *IfBlock =
llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
llvm::BasicBlock *ExitBlock =
@@ -455,10 +465,13 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// of HIP ABI.
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::LinkOnceAnyLinkage,
+ Linkage,
/*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
"__hip_gpubin_handle");
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getQuantity());
+ // Prevent the weak symbol in different shared libraries being merged.
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Address GpuBinaryAddr(
GpuBinaryHandle,
CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
@@ -507,7 +520,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Generate a unique module ID.
SmallString<64> ModuleID;
llvm::raw_svector_ostream OS(ModuleID);
- OS << ModuleIDPrefix << llvm::format("%x", FatbinWrapper->getGUID());
+ OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
llvm::Constant *ModuleIDConstant =
makeConstantString(ModuleID.str(), "", ModuleIDSectionName, 32);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index d5945be43458..8b0733fbec3e 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -23,7 +23,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -276,7 +276,7 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc =
CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
- CGCallee Callee(GD.getDecl()->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 3b1b47cdfe07..ed168b1ce72d 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -132,7 +132,7 @@ void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
// generation. Maybe we can come up with a better way?
auto *ThisDecl = ImplicitParamDecl::Create(
CGM.getContext(), nullptr, MD->getLocation(),
- &CGM.getContext().Idents.get("this"), MD->getThisType(CGM.getContext()),
+ &CGM.getContext().Idents.get("this"), MD->getThisType(),
ImplicitParamDecl::CXXThis);
params.push_back(ThisDecl);
CGF.CXXABIThisDecl = ThisDecl;
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index fa51dc30c58b..7d494bb1f1c7 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -23,11 +23,11 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -59,6 +59,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86Pascal: return llvm::CallingConv::C;
// TODO: Add support for __vectorcall to LLVM.
case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
+ case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
@@ -67,11 +68,13 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
}
}
-/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
/// qualification.
-/// FIXME: address space qualification?
-static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD) {
QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ if (MD)
+ RecTy = Context.getAddrSpaceQualType(RecTy, MD->getTypeQualifiers().getAddressSpace());
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
@@ -214,6 +217,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (PcsAttr *PCS = D->getAttr<PcsAttr>())
return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+ if (D->hasAttr<AArch64VectorPcsAttr>())
+ return CC_AArch64VectorCall;
+
if (D->hasAttr<IntelOclBiccAttr>())
return CC_IntelOclBicc;
@@ -246,7 +252,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
// Add the 'this' pointer.
if (RD)
- argTypes.push_back(GetThisType(Context, RD));
+ argTypes.push_back(GetThisType(Context, RD, MD));
else
argTypes.push_back(Context.VoidPtrTy);
@@ -302,7 +308,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(GetThisType(Context, MD->getParent()));
+ argTypes.push_back(GetThisType(Context, MD->getParent(), MD));
bool PassParams = true;
@@ -529,7 +535,7 @@ const CGFunctionInfo &
CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
- CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent(), MD) };
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
@@ -543,7 +549,7 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CanQual<FunctionProtoType> FTP = GetFormalType(CD);
SmallVector<CanQualType, 2> ArgTys;
const CXXRecordDecl *RD = CD->getParent();
- ArgTys.push_back(GetThisType(Context, RD));
+ ArgTys.push_back(GetThisType(Context, RD, CD));
if (CT == Ctor_CopyingClosure)
ArgTys.push_back(*FTP->param_type_begin());
if (RD->getNumVBases() > 0)
@@ -741,8 +747,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
FunctionType::ExtInfo info,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs required) {
- assert(std::all_of(argTypes.begin(), argTypes.end(),
- [](CanQualType T) { return T.isCanonicalAsParam(); }));
+ assert(llvm::all_of(argTypes,
+ [](CanQualType T) { return T.isCanonicalAsParam(); }));
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
@@ -1253,8 +1259,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// Otherwise do coercion through memory. This is stupid, but simple.
Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
- Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
- Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.AllocaInt8PtrTy);
+ Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
+ Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
CGF.Builder.CreateMemCpy(Casted, SrcCasted,
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
false);
@@ -1335,8 +1341,8 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
- Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.AllocaInt8PtrTy);
- Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.AllocaInt8PtrTy);
+ Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
+ Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DstCasted, Casted,
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
false);
@@ -1709,6 +1715,8 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
+ if (CodeGenOpts.IndirectTlsSegRefs)
+ FuncAttrs.addAttribute("indirect-tls-seg-refs");
if (CodeGenOpts.NoImplicitFloat)
FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
@@ -1784,6 +1792,11 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
FuncAttrs.addAttribute("stackrealign");
if (CodeGenOpts.Backchain)
FuncAttrs.addAttribute("backchain");
+
+ // FIXME: The interaction of this attribute with the SLH command line flag
+ // has not been determined.
+ if (CodeGenOpts.SpeculativeLoadHardening)
+ FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
}
if (getLangOpts().assumeFunctionsAreConvergent()) {
@@ -1803,6 +1816,12 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
if (CodeGenOpts.FlushDenorm)
FuncAttrs.addAttribute("nvptx-f32ftz", "true");
}
+
+ for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
+ StringRef Var, Value;
+ std::tie(Var, Value) = Attr.split('=');
+ FuncAttrs.addAttribute(Var, Value);
+ }
}
void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
@@ -1828,7 +1847,7 @@ void CodeGenModule::ConstructAttributeList(
AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
CalleeInfo.getCalleeFunctionProtoType());
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
bool HasOptnone = false;
// FIXME: handle sseregparm someday...
@@ -1845,6 +1864,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (TargetDecl->hasAttr<ConvergentAttr>())
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+ if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1936,7 +1957,7 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("disable-tail-calls",
llvm::toStringRef(DisableTailCalls));
- GetCPUAndFeaturesAttributes(TargetDecl, FuncAttrs);
+ GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
@@ -2327,7 +2348,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
// Load scalar value from indirect argument.
llvm::Value *V =
- EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
+ EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
@@ -2389,7 +2410,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (!AVAttr)
if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
- if (AVAttr) {
+ if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
+ // If alignment-assumption sanitizer is enabled, we do *not* add
+ // alignment attribute here, but emit normal alignment assumption,
+ // so the UBSAN check could function.
llvm::Value *AlignmentValue =
EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI =
@@ -2490,7 +2514,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Match to what EmitParmDecl is expecting for this type.
if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
llvm::Value *V =
- EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
+ EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
ArgVals.push_back(ParamValue::forDirect(V));
@@ -3063,8 +3087,9 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
QualType type = param->getType();
- assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
- "cannot emit delegate call arguments for inalloca arguments!");
+ if (isInAllocaArgument(CGM.getCXXABI(), type)) {
+ CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
+ }
// GetAddrOfLocalVar returns a pointer-to-pointer for references,
// but the argument needs to be the original pointer.
@@ -3945,15 +3970,28 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else if (I->hasLValue()) {
auto LV = I->getKnownLValue();
auto AS = LV.getAddressSpace();
+
if ((!ArgInfo.getIndirectByVal() &&
(LV.getAlignment() >=
- getContext().getTypeAlignInChars(I->Ty))) ||
- (ArgInfo.getIndirectByVal() &&
- ((AS != LangAS::Default && AS != LangAS::opencl_private &&
- AS != CGM.getASTAllocaAddressSpace())))) {
+ getContext().getTypeAlignInChars(I->Ty)))) {
+ NeedCopy = true;
+ }
+ if (!getLangOpts().OpenCL) {
+ if ((ArgInfo.getIndirectByVal() &&
+ (AS != LangAS::Default &&
+ AS != CGM.getASTAllocaAddressSpace()))) {
+ NeedCopy = true;
+ }
+ }
+ // For OpenCL even if RV is located in default or alloca address space
+ // we don't want to perform address space cast for it.
+ else if ((ArgInfo.getIndirectByVal() &&
+ Addr.getType()->getAddressSpace() != IRFuncTy->
+ getParamType(FirstIRArg)->getPointerAddressSpace())) {
NeedCopy = true;
}
}
+
if (NeedCopy) {
// Create an aligned temporary, and copy to it.
Address AI = CreateMemTempWithoutCast(
@@ -4235,6 +4273,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
#endif
+ // Update the largest vector width if any arguments have vector types.
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+ }
+
// Compute the calling convention and attributes.
unsigned CallingConv;
llvm::AttributeList Attrs;
@@ -4248,8 +4293,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !(Callee.getAbstractInfo().getCalleeDecl() &&
- Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
+ !(Callee.getAbstractInfo().getCalleeDecl().getDecl() &&
+ Callee.getAbstractInfo()
+ .getCalleeDecl()
+ .getDecl()
+ ->hasAttr<NoInlineAttr>())) {
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::AlwaysInline);
@@ -4315,6 +4363,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!CI->getType()->isVoidTy())
CI->setName("call");
+ // Update largest vector width from the return type.
+ if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
@@ -4329,7 +4382,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Suppress tail calls if requested.
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
}
@@ -4476,7 +4529,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} ();
// Emit the assume_aligned check on the return value.
- const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
@@ -4485,13 +4538,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
- EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
- OffsetValue);
+ EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
+ AlignmentCI->getZExtValue(), OffsetValue);
} else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
- llvm::Value *ParamVal =
- CallArgs[AA->getParamIndex().getLLVMIndex()].getRValue(
- *this).getScalarVal();
- EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
+ llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
+ .getRValue(*this)
+ .getScalarVal();
+ EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
+ AlignmentVal);
}
}
@@ -4502,8 +4556,8 @@ CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
if (isVirtual()) {
const CallExpr *CE = getVirtualCallExpr();
return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
- CGF, getVirtualMethodDecl(), getThisAddress(),
- getFunctionType(), CE ? CE->getLocStart() : SourceLocation());
+ CGF, getVirtualMethodDecl(), getThisAddress(), getFunctionType(),
+ CE ? CE->getBeginLoc() : SourceLocation());
}
return *this;
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 99a36e4e12f1..c300808bea28 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -46,21 +46,21 @@ class CGCalleeInfo {
/// The function prototype of the callee.
const FunctionProtoType *CalleeProtoTy;
/// The function declaration of the callee.
- const Decl *CalleeDecl;
+ GlobalDecl CalleeDecl;
public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
- CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
- CGCalleeInfo(const Decl *calleeDecl)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl() {}
+ CGCalleeInfo(GlobalDecl calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
const FunctionProtoType *getCalleeFunctionProtoType() const {
return CalleeProtoTy;
}
- const Decl *getCalleeDecl() const { return CalleeDecl; }
+ const GlobalDecl getCalleeDecl() const { return CalleeDecl; }
};
/// All available information about a concrete callee.
@@ -171,7 +171,7 @@ public:
}
CGCalleeInfo getAbstractInfo() const {
if (isVirtual())
- return VirtualInfo.MD.getDecl();
+ return VirtualInfo.MD;
assert(isOrdinary());
return AbstractInfo;
}
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index ec4eb000a3b9..ee150a792b76 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -16,14 +16,15 @@
#include "CGDebugInfo.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
@@ -829,7 +830,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
- EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc());
return;
}
@@ -2012,8 +2013,19 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
bool NewPointerIsChecked) {
CallArgList Args;
+ LangAS SlotAS = E->getType().getAddressSpace();
+ QualType ThisType = D->getThisType();
+ LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace();
+ llvm::Value *ThisPtr = This.getPointer();
+ if (SlotAS != ThisAS) {
+ unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
+ llvm::Type *NewType =
+ ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
+ ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
+ ThisAS, SlotAS, NewType);
+ }
// Push the this ptr.
- Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
+ Args.add(RValue::get(ThisPtr), D->getThisType());
// If this is a trivial constructor, emit a memcpy now before we lose
// the alignment information on the argument.
@@ -2122,7 +2134,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, D);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
EmitCall(Info, Callee, ReturnValueSlot(), Args);
// Generate vtable assumptions if we're constructing a complete object
@@ -2147,7 +2159,7 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
const CXXConstructorDecl *D, bool ForVirtualBase, Address This,
bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) {
CallArgList Args;
- CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType(getContext()));
+ CallArg ThisArg(RValue::get(This.getPointer()), D->getThisType());
// Forward the parameters.
if (InheritedFromVBase &&
@@ -2196,6 +2208,7 @@ void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
GlobalDecl GD(Ctor, CtorType);
InlinedInheritingConstructorScope Scope(*this, GD);
ApplyInlineDebugLocation DebugScope(*this, GD);
+ RunCleanupsScope RunCleanups(*this);
// Save the arguments to be passed to the inherited constructor.
CXXInheritedCtorInitExprArgs = Args;
@@ -2271,7 +2284,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This.getPointer()), D->getThisType(getContext()));
+ Args.add(RValue::get(This.getPointer()), D->getThisType());
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
@@ -2808,7 +2821,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// variadic arguments.
// Now emit our call.
- auto callee = CGCallee::forDirect(calleePtr, callOperator);
+ auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator));
RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
@@ -2839,12 +2852,12 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
CallArgList CallArgs;
QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
- Address ThisPtr = GetAddrOfBlockDecl(variable, false);
+ Address ThisPtr = GetAddrOfBlockDecl(variable);
CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
for (auto param : BD->parameters())
- EmitDelegateCallArg(CallArgs, param, param->getLocStart());
+ EmitDelegateCallArg(CallArgs, param, param->getBeginLoc());
assert(!Lambda->isGenericLambda() &&
"generic lambda interconversion to block not implemented");
@@ -2863,7 +2876,7 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
// Add the rest of the parameters.
for (auto Param : MD->parameters())
- EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
+ EmitDelegateCallArg(CallArgs, Param, Param->getBeginLoc());
const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
// For a generic lambda, find the corresponding call operator specialization
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 0a766d176200..3743d24f11fc 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -366,7 +366,7 @@ static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
llvm::BasicBlock *Block) {
// If it's a branch, turn it into a switch whose default
// destination is its original target.
- llvm::TerminatorInst *Term = Block->getTerminator();
+ llvm::Instruction *Term = Block->getTerminator();
assert(Term && "can't transition block without terminator");
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
@@ -589,7 +589,7 @@ static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
llvm::BasicBlock *To) {
// Exit is the exit block of a cleanup, so it always terminates in
// an unconditional branch or a switch.
- llvm::TerminatorInst *Term = Exit->getTerminator();
+ llvm::Instruction *Term = Exit->getTerminator();
if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
index 4f525c8aac85..80fa7c873631 100644
--- a/lib/CodeGen/CGCoroutine.cpp
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -93,10 +93,10 @@ static void createCoroData(CodeGenFunction &CGF,
CallExpr const *CoroIdExpr = nullptr) {
if (CurCoro.Data) {
if (CurCoro.Data->CoroIdExpr)
- CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ CGF.CGM.Error(CoroIdExpr->getBeginLoc(),
"only one __builtin_coro_id can be used in a function");
else if (CoroIdExpr)
- CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ CGF.CGM.Error(CoroIdExpr->getBeginLoc(),
"__builtin_coro_id shall not be used in a C++ coroutine");
else
llvm_unreachable("EmitCoroutineBodyStatement called twice?");
@@ -444,7 +444,7 @@ struct CallCoroDelete final : public EHScopeStack::Cleanup {
// We should have captured coro.free from the emission of deallocate.
auto *CoroFree = CGF.CurCoro.Data->LastCoroFree;
if (!CoroFree) {
- CGF.CGM.Error(Deallocate->getLocStart(),
+ CGF.CGM.Error(Deallocate->getBeginLoc(),
"Deallocation expressoin does not refer to coro.free");
return;
}
@@ -654,7 +654,7 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
EmitBlock(BodyBB);
}
- auto Loc = S.getLocStart();
+ auto Loc = S.getBeginLoc();
CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr,
CurCoro.Data->ExceptionHandler);
auto *TryStmt =
@@ -707,8 +707,8 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
if (CurCoro.Data && CurCoro.Data->CoroBegin) {
return RValue::get(CurCoro.Data->CoroBegin);
}
- CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_begin "
- "has been used earlier in this function");
+ CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_begin "
+ "has been used earlier in this function");
auto NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
return RValue::get(NullPtr);
}
@@ -722,7 +722,7 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
Args.push_back(CurCoro.Data->CoroId);
break;
}
- CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_id has"
+ CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_id has"
" been used earlier in this function");
// Fallthrough to the next case to add TokenNone as the first argument.
LLVM_FALLTHROUGH;
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 5be6fb3e4245..41f8721468a3 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -25,10 +25,10 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
@@ -41,6 +41,7 @@
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
@@ -180,8 +181,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
-
- if (PCLoc.isInvalid() || Scope->getFilename() == PCLoc.getFilename())
+ if (PCLoc.isInvalid() || Scope->getFile() == getOrCreateFile(CurLoc))
return;
if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) {
@@ -220,7 +220,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- getOrCreateMainFile());
+ TheCU->getFile());
return Default;
}
@@ -234,6 +234,9 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
if (CGM.getCodeGenOpts().EmitCodeView)
PP.MSVCFormatting = true;
+ // Apply -fdebug-prefix-map.
+ PP.RemapFilePaths = true;
+ PP.remapPath = [this](StringRef Path) { return remapDIPath(Path); };
return PP;
}
@@ -401,19 +404,18 @@ Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
- return getOrCreateMainFile();
+ return TheCU->getFile();
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
+ StringRef FileName = PLoc.getFilename();
+ if (PLoc.isInvalid() || FileName.empty())
// If the location is not valid then use main input file.
- return getOrCreateMainFile();
+ return TheCU->getFile();
// Cache the results.
- const char *fname = PLoc.getFilename();
- auto It = DIFileCache.find(fname);
-
+ auto It = DIFileCache.find(FileName.data());
if (It != DIFileCache.end()) {
// Verify that the information still exists.
if (llvm::Metadata *V = It->second)
@@ -426,22 +428,48 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
-
- llvm::DIFile *F = DBuilder.createFile(
- remapDIPath(PLoc.getFilename()), remapDIPath(getCurrentDirname()), CSInfo,
- getSource(SM, SM.getFileID(Loc)));
-
- DIFileCache[fname].reset(F);
+ return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
+}
+
+llvm::DIFile *
+CGDebugInfo::createFile(StringRef FileName,
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ Optional<StringRef> Source) {
+ StringRef Dir;
+ StringRef File;
+ std::string RemappedFile = remapDIPath(FileName);
+ std::string CurDir = remapDIPath(getCurrentDirname());
+ SmallString<128> DirBuf;
+ SmallString<128> FileBuf;
+ if (llvm::sys::path::is_absolute(RemappedFile)) {
+ // Strip the common prefix (if it is more than just "/") from current
+ // directory and FileName for a more space-efficient encoding.
+ auto FileIt = llvm::sys::path::begin(RemappedFile);
+ auto FileE = llvm::sys::path::end(RemappedFile);
+ auto CurDirIt = llvm::sys::path::begin(CurDir);
+ auto CurDirE = llvm::sys::path::end(CurDir);
+ for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
+ llvm::sys::path::append(DirBuf, *CurDirIt);
+ if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
+ // The common prefix only the root; stripping it would cause
+ // LLVM diagnostic locations to be more confusing.
+ Dir = {};
+ File = RemappedFile;
+ } else {
+ for (; FileIt != FileE; ++FileIt)
+ llvm::sys::path::append(FileBuf, *FileIt);
+ Dir = DirBuf;
+ File = FileBuf;
+ }
+ } else {
+ Dir = CurDir;
+ File = RemappedFile;
+ }
+ llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source);
+ DIFileCache[FileName.data()].reset(F);
return F;
}
-llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
- return DBuilder.createFile(
- remapDIPath(TheCU->getFilename()), remapDIPath(TheCU->getDirectory()),
- TheCU->getFile()->getChecksum(),
- CGM.getCodeGenOpts().EmbedSource ? TheCU->getSource() : None);
-}
-
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
for (const auto &Entry : DebugPrefixMap)
if (Path.startswith(Entry.first))
@@ -527,11 +555,11 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::dwarf::SourceLanguage LangTag;
const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
- if (LO.ObjC1)
+ if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
else
LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
- } else if (LO.ObjC1) {
+ } else if (LO.ObjC) {
LangTag = llvm::dwarf::DW_LANG_ObjC;
} else if (LO.RenderScript) {
LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript;
@@ -545,7 +573,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
- if (LO.ObjC1)
+ if (LO.ObjC)
RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
llvm::DICompileUnit::DebugEmissionKind EmissionKind;
@@ -557,29 +585,42 @@ void CGDebugInfo::CreateCompileUnit() {
case codegenoptions::DebugLineTablesOnly:
EmissionKind = llvm::DICompileUnit::LineTablesOnly;
break;
+ case codegenoptions::DebugDirectivesOnly:
+ EmissionKind = llvm::DICompileUnit::DebugDirectivesOnly;
+ break;
case codegenoptions::LimitedDebugInfo:
case codegenoptions::FullDebugInfo:
EmissionKind = llvm::DICompileUnit::FullDebug;
break;
}
+ uint64_t DwoId = 0;
+ auto &CGOpts = CGM.getCodeGenOpts();
+ // The DIFile used by the CU is distinct from the main source
+ // file. Its directory part specifies what becomes the
+ // DW_AT_comp_dir (the compilation directory), even if the source
+ // file was specified with an absolute path.
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
+ llvm::DIFile *CUFile = DBuilder.createFile(
+ remapDIPath(MainFileName), remapDIPath(getCurrentDirname()), CSInfo,
+ getSource(SM, SM.getMainFileID()));
// Create new compile unit.
- // FIXME - Eliminate TheCU.
- auto &CGOpts = CGM.getCodeGenOpts();
TheCU = DBuilder.createCompileUnit(
- LangTag,
- DBuilder.createFile(remapDIPath(MainFileName),
- remapDIPath(getCurrentDirname()), CSInfo,
- getSource(SM, SM.getMainFileID())),
- CGOpts.EmitVersionIdentMetadata ? Producer : "",
+ LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers,
- CGOpts.EnableSplitDwarf ? "" : CGOpts.SplitDwarfFile, EmissionKind,
- 0 /* DWOid */, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
- CGOpts.GnuPubnames);
+ (CGOpts.getSplitDwarfMode() != CodeGenOptions::NoFission)
+ ? ""
+ : CGOpts.SplitDwarfFile,
+ EmissionKind, DwoId, CGOpts.SplitDwarfInlining,
+ CGOpts.DebugInfoForProfiling,
+ CGM.getTarget().getTriple().isNVPTX()
+ ? llvm::DICompileUnit::DebugNameTableKind::None
+ : static_cast<llvm::DICompileUnit::DebugNameTableKind>(
+ CGOpts.DebugNameTable),
+ CGOpts.DebugRangesBaseAddress);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -597,9 +638,9 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return nullptr;
case BuiltinType::ObjCClass:
if (!ClassTy)
- ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ ClassTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU, TheCU->getFile(), 0);
return ClassTy;
case BuiltinType::ObjCId: {
// typedef struct objc_class *Class;
@@ -611,21 +652,21 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return ObjTy;
if (!ClassTy)
- ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- "objc_class", TheCU,
- getOrCreateMainFile(), 0);
+ ClassTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
- ObjTy = DBuilder.createStructType(
- TheCU, "objc_object", getOrCreateMainFile(), 0, 0, 0,
- llvm::DINode::FlagZero, nullptr, llvm::DINodeArray());
+ ObjTy = DBuilder.createStructType(TheCU, "objc_object", TheCU->getFile(), 0,
+ 0, 0, llvm::DINode::FlagZero, nullptr,
+ llvm::DINodeArray());
DBuilder.replaceArrays(
ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0,
+ ObjTy, "isa", TheCU->getFile(), 0, Size, 0, 0,
llvm::DINode::FlagZero, ISATy)));
return ObjTy;
}
@@ -633,7 +674,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
if (!SelTy)
SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_selector", TheCU,
- getOrCreateMainFile(), 0);
+ TheCU->getFile(), 0);
return SelTy;
}
@@ -652,6 +693,10 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
case BuiltinType::OCLReserveID:
return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return getOrCreateStructPtrType("opencl_" #ExtType, Id##Ty);
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::UChar:
case BuiltinType::Char_U:
@@ -825,31 +870,45 @@ static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) {
}
}
-// Determines if the tag declaration will require a type identifier.
+// Determines if the debug info for this tag declaration needs a type
+// identifier. The purpose of the unique identifier is to deduplicate type
+// information for identical types across TUs. Because of the C++ one definition
+// rule (ODR), it is valid to assume that the type is defined the same way in
+// every TU and its debug info is equivalent.
+//
+// C does not have the ODR, and it is common for codebases to contain multiple
+// different definitions of a struct with the same name in different TUs.
+// Therefore, if the type doesn't have a C++ mangling, don't give it an
+// identifer. Type information in C is smaller and simpler than C++ type
+// information, so the increase in debug info size is negligible.
+//
+// If the type is not externally visible, it should be unique to the current TU,
+// and should not need an identifier to participate in type deduplication.
+// However, when emitting CodeView, the format internally uses these
+// unique type name identifers for references between debug info. For example,
+// the method of a class in an anonymous namespace uses the identifer to refer
+// to its parent class. The Microsoft C++ ABI attempts to provide unique names
+// for such types, so when emitting CodeView, always use identifiers for C++
+// types. This may create problems when attempting to emit CodeView when the MS
+// C++ ABI is not in use.
static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
// We only add a type identifier for types with C++ name mangling.
if (!hasCXXMangling(TD, TheCU))
return false;
- // CodeView types with C++ mangling need a type identifier.
- if (CGM.getCodeGenOpts().EmitCodeView)
- return true;
-
// Externally visible types with C++ mangling need a type identifier.
if (TD->isExternallyVisible())
return true;
+ // CodeView types with C++ mangling need a type identifier.
+ if (CGM.getCodeGenOpts().EmitCodeView)
+ return true;
+
return false;
}
-// When emitting CodeView debug information we need to produce a type
-// identifier for all types which have a C++ mangling. Until a GUID is added
-// to the identifier (not currently implemented) the result will not be unique
-// across compilation units.
-// When emitting DWARF debug information, we need to produce a type identifier
-// for all externally visible types with C++ name mangling. This identifier
-// should be unique across ODR-compliant compilation units.
+// Returns a unique type identifier string if one exists, or an empty string.
static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
SmallString<256> Identifier;
@@ -936,18 +995,53 @@ llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
if (Cache)
return Cache;
Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name,
- TheCU, getOrCreateMainFile(), 0);
+ TheCU, TheCU->getFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
Cache = DBuilder.createPointerType(Cache, Size);
return Cache;
}
+uint64_t CGDebugInfo::collectDefaultElementTypesForBlockPointer(
+ const BlockPointerType *Ty, llvm::DIFile *Unit, llvm::DIDerivedType *DescTy,
+ unsigned LineNo, SmallVectorImpl<llvm::Metadata *> &EltTys) {
+ QualType FType;
+
+ // Advanced by calls to CreateMemberType in increments of FType, then
+ // returned as the overall size of the default elements.
+ uint64_t FieldOffset = 0;
+
+ // Blocks in OpenCL have unique constraints which make the standard fields
+ // redundant while requiring size and align fields for enqueue_kernel. See
+ // initializeForBlockHeader in CGBlocks.cpp
+ if (CGM.getLangOpts().OpenCL) {
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__align", &FieldOffset));
+ } else {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(Ty->getPointeeType());
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(Ty);
+ uint32_t FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ EltTys.push_back(DBuilder.createMemberType(
+ Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign,
+ FieldOffset, llvm::DINode::FlagZero, DescTy));
+ FieldOffset += FieldSize;
+ }
+
+ return FieldOffset;
+}
+
llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
llvm::DIFile *Unit) {
SmallVector<llvm::Metadata *, 8> EltTys;
QualType FType;
- uint64_t FieldSize, FieldOffset;
- uint32_t FieldAlign;
+ uint64_t FieldOffset;
llvm::DINodeArray Elements;
FieldOffset = 0;
@@ -959,10 +1053,9 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.clear();
llvm::DINode::DIFlags Flags = llvm::DINode::FlagAppleBlock;
- unsigned LineNo = 0;
auto *EltTy =
- DBuilder.createStructType(Unit, "__block_descriptor", nullptr, LineNo,
+ DBuilder.createStructType(Unit, "__block_descriptor", nullptr, 0,
FieldOffset, 0, Flags, nullptr, Elements);
// Bit size, align and offset of the type.
@@ -970,27 +1063,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
auto *DescTy = DBuilder.createPointerType(EltTy, Size);
- FieldOffset = 0;
- if (CGM.getLangOpts().OpenCL) {
- FType = CGM.getContext().IntTy;
- EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
- EltTys.push_back(CreateMemberType(Unit, FType, "__align", &FieldOffset));
- } else {
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
- FType = CGM.getContext().IntTy;
- EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
- EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
- FType = CGM.getContext().getPointerType(Ty->getPointeeType());
- EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- FieldSize = CGM.getContext().getTypeSize(Ty);
- FieldAlign = CGM.getContext().getTypeAlign(Ty);
- EltTys.push_back(DBuilder.createMemberType(
- Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign, FieldOffset,
- llvm::DINode::FlagZero, DescTy));
- FieldOffset += FieldSize;
- }
+ FieldOffset = collectDefaultElementTypesForBlockPointer(Ty, Unit, DescTy,
+ 0, EltTys);
Elements = DBuilder.getOrCreateArray(EltTys);
@@ -998,7 +1072,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
// DW_AT_APPLE_BLOCK attribute and are an implementation detail only
// the debugger needs to know about. To allow type uniquing, emit
// them without a name or a location.
- EltTy = DBuilder.createStructType(Unit, "", nullptr, LineNo, FieldOffset, 0,
+ EltTy = DBuilder.createStructType(Unit, "", nullptr, 0, FieldOffset, 0,
Flags, nullptr, Elements);
return DBuilder.createPointerType(EltTy, Size);
@@ -1058,6 +1132,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_X86_64SysV:
return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
+ case CC_AArch64VectorCall:
return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
@@ -1353,8 +1428,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
- return getOrCreateInstanceMethodType(Method->getThisType(CGM.getContext()),
- Func, Unit);
+ return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit);
}
llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
@@ -1450,16 +1524,16 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// Collect virtual method info.
llvm::DIType *ContainingType = nullptr;
- unsigned Virtuality = 0;
unsigned VIndex = 0;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
int ThisAdjustment = 0;
if (Method->isVirtual()) {
if (Method->isPure())
- Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ SPFlags |= llvm::DISubprogram::SPFlagPureVirtual;
else
- Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+ SPFlags |= llvm::DISubprogram::SPFlagVirtual;
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
// It doesn't make sense to give a virtual destructor a vtable index,
@@ -1511,12 +1585,13 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
Flags |= llvm::DINode::FlagLValueReference;
if (Method->getRefQualifier() == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram *SP = DBuilder.createMethod(
RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false, /*isDefinition=*/false, Virtuality,
- VIndex, ThisAdjustment, ContainingType, Flags, CGM.getLangOpts().Optimize,
+ MethodTy, VIndex, ThisAdjustment, ContainingType, Flags, SPFlags,
TParamsArray.get());
SPCache[Method->getCanonicalDecl()].reset(SP);
@@ -1741,6 +1816,29 @@ CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
return llvm::DINodeArray();
}
+llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
+ llvm::DIFile *Unit) {
+ if (auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL)) {
+ auto T = TS->getSpecializedTemplateOrPartial();
+ auto TA = TS->getTemplateArgs().asArray();
+ // Collect parameters for a partial specialization
+ if (T.is<VarTemplatePartialSpecializationDecl *>()) {
+ const TemplateParameterList *TList =
+ T.get<VarTemplatePartialSpecializationDecl *>()
+ ->getTemplateParameters();
+ return CollectTemplateParams(TList, TA, Unit);
+ }
+
+ // Collect parameters for an explicit specialization
+ if (T.is<VarTemplateDecl *>()) {
+ const TemplateParameterList *TList = T.get<VarTemplateDecl *>()
+ ->getTemplateParameters();
+ return CollectTemplateParams(TList, TA, Unit);
+ }
+ }
+ return llvm::DINodeArray();
+}
+
llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
// Always get the full list of parameters, not just the ones from
@@ -1896,8 +1994,17 @@ static bool isDefinedInClangModule(const RecordDecl *RD) {
if (auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) {
if (!CXXDecl->isCompleteDefinition())
return false;
+ // Check wether RD is a template.
auto TemplateKind = CXXDecl->getTemplateSpecializationKind();
if (TemplateKind != TSK_Undeclared) {
+ // Unfortunately getOwningModule() isn't accurate enough to find the
+ // owning module of a ClassTemplateSpecializationDecl that is inside a
+ // namespace spanning multiple modules.
+ bool Explicit = false;
+ if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(CXXDecl))
+ Explicit = TD->isExplicitInstantiationOrSpecialization();
+ if (!Explicit && CXXDecl->getEnclosingNamespaceContext())
+ return false;
// This is a template, check the origin of the first member.
if (CXXDecl->field_begin() == CXXDecl->field_end())
return TemplateKind == TSK_ExplicitInstantiationDeclaration;
@@ -2445,9 +2552,9 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
Count = CAT->getSize().getZExtValue();
else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
if (Expr *Size = VAT->getSizeExpr()) {
- llvm::APSInt V;
- if (Size->EvaluateAsInt(V, CGM.getContext()))
- Count = V.getExtValue();
+ Expr::EvalResult Result;
+ if (Size->EvaluateAsInt(Result, CGM.getContext()))
+ Count = Result.Val.getInt().getExtValue();
}
}
@@ -2513,9 +2620,9 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
const FunctionProtoType *FPT =
Ty->getPointeeType()->getAs<FunctionProtoType>();
return DBuilder.createMemberPointerType(
- getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType(
- Ty->getClass(), FPT->getTypeQuals())),
- FPT, U),
+ getOrCreateInstanceMethodType(
+ CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
+ FPT, U),
ClassType, Size, /*Align=*/0, Flags);
}
@@ -2603,7 +2710,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit);
return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
Line, Size, Align, EltArray, ClassTy,
- Identifier, ED->isFixed());
+ Identifier, ED->isScoped());
}
llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent,
@@ -3035,6 +3142,7 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T,
StringRef &Name, StringRef &LinkageName,
+ llvm::MDTuple *&TemplateParameters,
llvm::DIScope *&VDContext) {
Unit = getOrCreateFile(VD->getLocation());
LineNo = getLineNumber(VD->getLocation());
@@ -3058,6 +3166,13 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
if (LinkageName == Name)
LinkageName = StringRef();
+ if (isa<VarTemplateSpecializationDecl>(VD)) {
+ llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VD, &*Unit);
+ TemplateParameters = parameterNodes.get();
+ } else {
+ TemplateParameters = nullptr;
+ }
+
// Since we emit declarations (DW_AT_members) for static members, place the
// definition of those static members in the namespace they were declared in
// in the source code (the lexical decl context).
@@ -3084,6 +3199,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
SourceLocation Loc = GD.getDecl()->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
@@ -3100,20 +3216,23 @@ llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
QualType FnType = CGM.getContext().getFunctionType(
FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
+ if (!FD->isExternallyVisible())
+ SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
+
if (Stub) {
+ Flags |= getCallSiteRelatedAttrs();
+ SPFlags |= llvm::DISubprogram::SPFlagDefinition;
return DBuilder.createFunction(
DContext, Name, LinkageName, Unit, Line,
- getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
- !FD->isExternallyVisible(),
- /* isDefinition = */ true, 0, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
}
llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
DContext, Name, LinkageName, Unit, Line,
- getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
- !FD->isExternallyVisible(),
- /* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit), 0, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(FD));
const FunctionDecl *CanonDecl = FD->getCanonicalDecl();
FwdDeclReplaceMap.emplace_back(std::piecewise_construct,
@@ -3138,12 +3257,14 @@ CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
+ llvm::MDTuple *TemplateParameters = nullptr;
- collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, TemplateParameters,
+ DContext);
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
- !VD->isExternallyVisible(), nullptr, Align);
+ !VD->isExternallyVisible(), nullptr, TemplateParameters, Align);
FwdDeclReplaceMap.emplace_back(
std::piecewise_construct,
std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
@@ -3299,6 +3420,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
bool HasDecl = (D != nullptr);
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = Unit;
llvm::DINodeArray TParamsArray;
@@ -3338,6 +3460,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (CurFuncIsThunk)
Flags |= llvm::DINode::FlagThunk;
+ if (Fn->hasLocalLinkage())
+ SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
+
+ llvm::DINode::DIFlags FlagsForDef = Flags | getCallSiteRelatedAttrs();
+ llvm::DISubprogram::DISPFlags SPFlagsForDef =
+ SPFlags | llvm::DISubprogram::SPFlagDefinition;
+
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
@@ -3348,9 +3479,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
// are emitted as CU level entities by the backend.
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), Fn->hasLocalLinkage(),
- true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
- TParamsArray.get(), getFunctionDeclaration(D));
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, FlagsForDef,
+ SPFlagsForDef, TParamsArray.get(), getFunctionDeclaration(D));
Fn->setSubprogram(SP);
// We might get here with a VarDecl in the case we're generating
// code for the initialization of globals. Do not record these decls
@@ -3370,8 +3500,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
cast<llvm::DICompositeType>(It->second);
llvm::DISubprogram *FD = DBuilder.createFunction(
InterfaceDecl, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), Fn->hasLocalLinkage(),
- false /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
TParamsArray.get());
DBuilder.finalizeSubprogram(FD);
ObjCMethodCache[ID].push_back(FD);
@@ -3420,11 +3549,13 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
}
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = 0;
+ llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
+ if (CGM.getLangOpts().Optimize)
+ SPFlags |= llvm::DISubprogram::SPFlagOptimized;
DBuilder.retainType(DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), false /*internalLinkage*/,
- false /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize,
+ getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
TParamsArray.get(), getFunctionDeclaration(D)));
}
@@ -3453,7 +3584,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
- if (CurLoc.isInvalid() || CurLoc.isMacroID())
+ if (CurLoc.isInvalid() || CurLoc.isMacroID() || LexicalBlockStack.empty())
return;
llvm::MDNode *Scope = LexicalBlockStack.back();
@@ -3530,9 +3661,9 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder, llvm::Function *Fn) {
DBuilder.finalizeSubprogram(Fn->getSubprogram());
}
-llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
- uint64_t *XOffset) {
-
+CGDebugInfo::BlockByRefType
+CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
+ uint64_t *XOffset) {
SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
@@ -3584,23 +3715,21 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
}
FType = Type;
- llvm::DIType *FieldTy = getOrCreateType(FType, Unit);
+ llvm::DIType *WrappedTy = getOrCreateType(FType, Unit);
FieldSize = CGM.getContext().getTypeSize(FType);
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
- FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
- FieldAlign, FieldOffset,
- llvm::DINode::FlagZero, FieldTy);
+ llvm::DIType *FieldTy = DBuilder.createMemberType(
+ Unit, VD->getName(), Unit, 0, FieldSize, FieldAlign, FieldOffset,
+ llvm::DINode::FlagZero, WrappedTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
-
- llvm::DINode::DIFlags Flags = llvm::DINode::FlagBlockByrefStruct;
-
- return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
- nullptr, Elements);
+ return {DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0,
+ llvm::DINode::FlagZero, nullptr, Elements),
+ WrappedTy};
}
llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
@@ -3621,7 +3750,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
llvm::DIType *Ty;
uint64_t XOffset = 0;
if (VD->hasAttr<BlocksAttr>())
- Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType;
else
Ty = getOrCreateType(VD->getType(), Unit);
@@ -3759,7 +3888,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
llvm::DIType *Ty;
if (isByRef)
- Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType;
else
Ty = getOrCreateType(VD->getType(), Unit);
@@ -3830,6 +3959,44 @@ bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
}
} // namespace
+void CGDebugInfo::collectDefaultFieldsForBlockLiteralDeclare(
+ const CGBlockInfo &Block, const ASTContext &Context, SourceLocation Loc,
+ const llvm::StructLayout &BlockLayout, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &Fields) {
+ // Blocks in OpenCL have unique constraints which make the standard fields
+ // redundant while requiring size and align fields for enqueue_kernel. See
+ // initializeForBlockHeader in CGBlocks.cpp
+ if (CGM.getLangOpts().OpenCL) {
+ Fields.push_back(createFieldType("__size", Context.IntTy, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(0),
+ Unit, Unit));
+ Fields.push_back(createFieldType("__align", Context.IntTy, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(1),
+ Unit, Unit));
+ } else {
+ Fields.push_back(createFieldType("__isa", Context.VoidPtrTy, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(0),
+ Unit, Unit));
+ Fields.push_back(createFieldType("__flags", Context.IntTy, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(1),
+ Unit, Unit));
+ Fields.push_back(
+ createFieldType("__reserved", Context.IntTy, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(2), Unit, Unit));
+ auto *FnTy = Block.getBlockExpr()->getFunctionType();
+ auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
+ Fields.push_back(createFieldType("__FuncPtr", FnPtrType, Loc, AS_public,
+ BlockLayout.getElementOffsetInBits(3),
+ Unit, Unit));
+ Fields.push_back(createFieldType(
+ "__descriptor",
+ Context.getPointerType(Block.NeedsCopyDispose
+ ? Context.getBlockDescriptorExtendedType()
+ : Context.getBlockDescriptorType()),
+ Loc, AS_public, BlockLayout.getElementOffsetInBits(4), Unit, Unit));
+ }
+}
+
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
StringRef Name,
unsigned ArgNo,
@@ -3852,35 +4019,8 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
CGM.getDataLayout().getStructLayout(block.StructureType);
SmallVector<llvm::Metadata *, 16> fields;
- if (CGM.getLangOpts().OpenCL) {
- fields.push_back(createFieldType("__size", C.IntTy, loc, AS_public,
- blockLayout->getElementOffsetInBits(0),
- tunit, tunit));
- fields.push_back(createFieldType("__align", C.IntTy, loc, AS_public,
- blockLayout->getElementOffsetInBits(1),
- tunit, tunit));
- } else {
- fields.push_back(createFieldType("__isa", C.VoidPtrTy, loc, AS_public,
- blockLayout->getElementOffsetInBits(0),
- tunit, tunit));
- fields.push_back(createFieldType("__flags", C.IntTy, loc, AS_public,
- blockLayout->getElementOffsetInBits(1),
- tunit, tunit));
- fields.push_back(createFieldType("__reserved", C.IntTy, loc, AS_public,
- blockLayout->getElementOffsetInBits(2),
- tunit, tunit));
- auto *FnTy = block.getBlockExpr()->getFunctionType();
- auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
- fields.push_back(createFieldType("__FuncPtr", FnPtrType, loc, AS_public,
- blockLayout->getElementOffsetInBits(3),
- tunit, tunit));
- fields.push_back(createFieldType(
- "__descriptor",
- C.getPointerType(block.NeedsCopyDispose
- ? C.getBlockDescriptorExtendedType()
- : C.getBlockDescriptorType()),
- loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit));
- }
+ collectDefaultFieldsForBlockLiteralDeclare(block, C, loc, *blockLayout, tunit,
+ fields);
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
@@ -3923,7 +4063,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
QualType type;
if (auto *Method =
cast_or_null<CXXMethodDecl>(blockDecl->getNonClosureContext()))
- type = Method->getThisType(C);
+ type = Method->getThisType();
else if (auto *RDecl = dyn_cast<CXXRecordDecl>(blockDecl->getParent()))
type = QualType(RDecl->getTypeForDecl(), 0);
else
@@ -3941,10 +4081,10 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
if (capture->isByRef()) {
TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0;
-
- // FIXME: this creates a second copy of this type!
+ // FIXME: This recomputes the layout of the BlockByRefWrapper.
uint64_t xoffset;
- fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
+ fieldType =
+ EmitTypeForVarWithBlocksAttr(variable, &xoffset).BlockByRefWrapper;
fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
PtrInfo.Width, Align, offsetInBits,
@@ -4045,7 +4185,9 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
unsigned LineNo;
StringRef DeclName, LinkageName;
QualType T;
- collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext);
+ llvm::MDTuple *TemplateParameters = nullptr;
+ collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName,
+ TemplateParameters, DContext);
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
@@ -4071,7 +4213,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasLocalLinkage(),
Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
- getOrCreateStaticDataMemberDeclarationOrNull(D), Align);
+ getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters,
+ Align);
Var->addDebugInfo(GVE);
}
DeclCache[D->getCanonicalDecl()].reset(GVE);
@@ -4128,10 +4271,19 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
InitExpr = DBuilder.createConstantValueExpression(
Init.getFloat().bitcastToAPInt().getZExtValue());
}
+
+ llvm::MDTuple *TemplateParameters = nullptr;
+
+ if (isa<VarTemplateSpecializationDecl>(VD))
+ if (VarD) {
+ llvm::DINodeArray parameterNodes = CollectVarTemplateParams(VarD, &*Unit);
+ TemplateParameters = parameterNodes.get();
+ }
+
GV.reset(DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD),
- Align));
+ TemplateParameters, Align));
}
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -4320,7 +4472,7 @@ void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
if (CGM.getCodeGenOpts().getDebugInfo() < codegenoptions::LimitedDebugInfo)
return;
- if (auto *DieTy = getOrCreateType(Ty, getOrCreateMainFile()))
+ if (auto *DieTy = getOrCreateType(Ty, TheCU->getFile()))
// Don't ignore in case of explicit cast where it is referenced indirectly.
DBuilder.retainType(DieTy);
}
@@ -4332,3 +4484,22 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
llvm::MDNode *Scope = LexicalBlockStack.back();
return llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc), Scope);
}
+
+llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
+ // Call site-related attributes are only useful in optimized programs, and
+ // when there's a possibility of debugging backtraces.
+ if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo ||
+ DebugKind == codegenoptions::LocTrackingOnly)
+ return llvm::DINode::FlagZero;
+
+ // Call site-related attributes are available in DWARF v5. Some debuggers,
+ // while not fully DWARF v5-compliant, may accept these attributes as if they
+ // were part of DWARF v4.
+ bool SupportsDWARFv4Ext =
+ CGM.getCodeGenOpts().DwarfVersion == 4 &&
+ CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB;
+ if (!SupportsDWARFv4Ext && CGM.getCodeGenOpts().DwarfVersion < 5)
+ return llvm::DINode::FlagZero;
+
+ return llvm::DINode::FlagAllCallsDescribed;
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index e632806138f0..031e40b9dde9 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -20,8 +20,8 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
@@ -76,6 +76,9 @@ class CGDebugInfo {
llvm::DIType *OCLQueueDITy = nullptr;
llvm::DIType *OCLNDRangeDITy = nullptr;
llvm::DIType *OCLReserveIDDITy = nullptr;
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ llvm::DIType *Id##Ty = nullptr;
+#include "clang/Basic/OpenCLExtensionTypes.def"
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
@@ -248,6 +251,11 @@ class CGDebugInfo {
llvm::DINodeArray CollectFunctionTemplateParams(const FunctionDecl *FD,
llvm::DIFile *Unit);
+ /// A helper function to collect debug info for function template
+ /// parameters.
+ llvm::DINodeArray CollectVarTemplateParams(const VarDecl *VD,
+ llvm::DIFile *Unit);
+
/// A helper function to collect debug info for template
/// parameters.
llvm::DINodeArray
@@ -311,12 +319,31 @@ class CGDebugInfo {
void AppendAddressSpaceXDeref(unsigned AddressSpace,
SmallVectorImpl<int64_t> &Expr) const;
+ /// A helper function to collect debug info for the default elements of a
+ /// block.
+ ///
+ /// \returns The next available field offset after the default elements.
+ uint64_t collectDefaultElementTypesForBlockPointer(
+ const BlockPointerType *Ty, llvm::DIFile *Unit,
+ llvm::DIDerivedType *DescTy, unsigned LineNo,
+ SmallVectorImpl<llvm::Metadata *> &EltTys);
+
+ /// A helper function to collect debug info for the default fields of a
+ /// block.
+ void collectDefaultFieldsForBlockLiteralDeclare(
+ const CGBlockInfo &Block, const ASTContext &Context, SourceLocation Loc,
+ const llvm::StructLayout &BlockLayout, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &Fields);
+
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
void finalize();
+ /// Remap a given path with the current debug prefix map
+ std::string remapDIPath(StringRef) const;
+
/// Register VLA size expression debug node with the qualified type.
void registerVLASizeExpression(QualType Ty, llvm::Metadata *SizeExpr) {
SizeExprCache[Ty] = SizeExpr;
@@ -475,9 +502,16 @@ private:
llvm::Optional<unsigned> ArgNo,
CGBuilderTy &Builder);
+ struct BlockByRefType {
+ /// The wrapper struct used inside the __block_literal struct.
+ llvm::DIType *BlockByRefWrapper;
+ /// The type as it appears in the source code.
+ llvm::DIType *WrappedType;
+ };
+
/// Build up structure info for the byref. See \a BuildByRefType.
- llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
- uint64_t *OffSet);
+ BlockByRefType EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
+ uint64_t *OffSet);
/// Get context info for the DeclContext of \p Decl.
llvm::DIScope *getDeclContextDescriptor(const Decl *D);
@@ -497,9 +531,6 @@ private:
/// Create new compile unit.
void CreateCompileUnit();
- /// Remap a given path with the current debug prefix map
- std::string remapDIPath(StringRef) const;
-
/// Compute the file checksum debug info for input file ID.
Optional<llvm::DIFile::ChecksumKind>
computeChecksum(FileID FID, SmallString<32> &Checksum) const;
@@ -507,11 +538,15 @@ private:
/// Get the source of the given file ID.
Optional<StringRef> getSource(const SourceManager &SM, FileID FID);
- /// Get the file debug info descriptor for the input location.
+ /// Convenience function to get the file debug info descriptor for the input
+ /// location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
- /// Get the file info for main compile unit.
- llvm::DIFile *getOrCreateMainFile();
+ /// Create a file debug info descriptor for a source file.
+ llvm::DIFile *
+ createFile(StringRef FileName,
+ Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ Optional<StringRef> Source);
/// Get the type from the cache or create a new type if necessary.
llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
@@ -580,6 +615,11 @@ private:
unsigned LineNo, StringRef LinkageName,
llvm::GlobalVariable *Var, llvm::DIScope *DContext);
+
+ /// Return flags which enable debug info emission for call sites, provided
+ /// that it is supported and enabled.
+ llvm::DINode::DIFlags getCallSiteRelatedAttrs() const;
+
/// Get the printing policy for producing names for debug info.
PrintingPolicy getPrintingPolicy() const;
@@ -622,7 +662,9 @@ private:
/// Collect various properties of a VarDecl.
void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T, StringRef &Name,
- StringRef &LinkageName, llvm::DIScope *&VDContext);
+ StringRef &LinkageName,
+ llvm::MDTuple *&TemplateParameters,
+ llvm::DIScope *&VDContext);
/// Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
@@ -702,7 +744,7 @@ public:
/// function \p InlinedFn. The current debug location becomes the inlined call
/// site of the inlined function.
ApplyInlineDebugLocation(CodeGenFunction &CGF, GlobalDecl InlinedFn);
- /// Restore everything back to the orginial state.
+ /// Restore everything back to the original state.
~ApplyInlineDebugLocation();
};
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 57b2fbadbeec..5959d889b455 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -26,10 +26,11 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
@@ -104,6 +105,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Import:
case Decl::OMPThreadPrivate:
case Decl::OMPCapturedExpr:
+ case Decl::OMPRequires:
case Decl::Empty:
// None of these decls require codegen support.
return;
@@ -545,7 +547,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Compute the address of the local variable, in case it's a
// byref or something.
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
SourceLocation());
@@ -563,7 +565,7 @@ namespace {
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
@@ -752,9 +754,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
- enterFullExpression(ewc);
- init = ewc->getSubExpr();
+ if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
+ enterFullExpression(fe);
+ init = fe->getSubExpr();
}
CodeGenFunction::RunCleanupsScope Scope(*this);
@@ -795,15 +797,21 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
+ case Qualifiers::OCL_Strong: {
+ if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+ // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
+ // that we omit the retain, and causes non-autoreleased return values to be
+ // immediately released.
+ LLVM_FALLTHROUGH;
+ }
+
case Qualifiers::OCL_ExplicitNone:
value = EmitARCUnsafeUnretainedScalarExpr(init);
break;
- case Qualifiers::OCL_Strong: {
- value = EmitARCRetainScalarExpr(init);
- break;
- }
-
case Qualifiers::OCL_Weak: {
// If it's not accessed by the initializer, try to emit the
// initialization with a copy or move.
@@ -948,111 +956,242 @@ static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
}
-/// A byte pattern.
-///
-/// Can be "any" pattern if the value was padding or known to be undef.
-/// Can be "none" pattern if a sequence doesn't exist.
-class BytePattern {
- uint8_t Val;
- enum class ValueType : uint8_t { Specific, Any, None } Type;
- BytePattern(ValueType Type) : Type(Type) {}
-
-public:
- BytePattern(uint8_t Value) : Val(Value), Type(ValueType::Specific) {}
- static BytePattern Any() { return BytePattern(ValueType::Any); }
- static BytePattern None() { return BytePattern(ValueType::None); }
- bool isAny() const { return Type == ValueType::Any; }
- bool isNone() const { return Type == ValueType::None; }
- bool isValued() const { return Type == ValueType::Specific; }
- uint8_t getValue() const {
- assert(isValued());
- return Val;
- }
- BytePattern merge(const BytePattern Other) const {
- if (isNone() || Other.isNone())
- return None();
- if (isAny())
- return Other;
- if (Other.isAny())
- return *this;
- if (getValue() == Other.getValue())
- return *this;
- return None();
- }
-};
-
-/// Figures out whether the constant can be initialized with memset.
-static BytePattern constantIsRepeatedBytePattern(llvm::Constant *C) {
- if (isa<llvm::ConstantAggregateZero>(C) || isa<llvm::ConstantPointerNull>(C))
- return BytePattern(0x00);
- if (isa<llvm::UndefValue>(C))
- return BytePattern::Any();
-
- if (isa<llvm::ConstantInt>(C)) {
- auto *Int = cast<llvm::ConstantInt>(C);
- if (Int->getBitWidth() % 8 != 0)
- return BytePattern::None();
- const llvm::APInt &Value = Int->getValue();
- if (Value.isSplat(8))
- return BytePattern(Value.getLoBits(8).getLimitedValue());
- return BytePattern::None();
- }
-
- if (isa<llvm::ConstantFP>(C)) {
- auto *FP = cast<llvm::ConstantFP>(C);
- llvm::APInt Bits = FP->getValueAPF().bitcastToAPInt();
- if (Bits.getBitWidth() % 8 != 0)
- return BytePattern::None();
- if (!Bits.isSplat(8))
- return BytePattern::None();
- return BytePattern(Bits.getLimitedValue() & 0xFF);
- }
-
- if (isa<llvm::ConstantVector>(C)) {
- llvm::Constant *Splat = cast<llvm::ConstantVector>(C)->getSplatValue();
- if (Splat)
- return constantIsRepeatedBytePattern(Splat);
- return BytePattern::None();
- }
-
- if (isa<llvm::ConstantArray>(C) || isa<llvm::ConstantStruct>(C)) {
- BytePattern Pattern(BytePattern::Any());
- for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) {
- llvm::Constant *Elt = cast<llvm::Constant>(C->getOperand(I));
- Pattern = Pattern.merge(constantIsRepeatedBytePattern(Elt));
- if (Pattern.isNone())
- return Pattern;
+/// Decide whether we should use memset to initialize a local variable instead
+/// of using a memcpy from a constant global. Assumes we've already decided to
+/// not user bzero.
+/// FIXME We could be more clever, as we are for bzero above, and generate
+/// memset followed by stores. It's unclear that's worth the effort.
+static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ uint64_t SizeLimit = 32;
+ if (GlobalSize <= SizeLimit)
+ return nullptr;
+ return llvm::isBytewiseValue(Init);
+}
+
+static llvm::Constant *patternFor(CodeGenModule &CGM, llvm::Type *Ty) {
+ // The following value is a guaranteed unmappable pointer value and has a
+ // repeated byte-pattern which makes it easier to synthesize. We use it for
+ // pointers as well as integers so that aggregates are likely to be
+ // initialized with this repeated value.
+ constexpr uint64_t LargeValue = 0xAAAAAAAAAAAAAAAAull;
+ // For 32-bit platforms it's a bit trickier because, across systems, only the
+ // zero page can reasonably be expected to be unmapped, and even then we need
+ // a very low address. We use a smaller value, and that value sadly doesn't
+ // have a repeated byte-pattern. We don't use it for integers.
+ constexpr uint32_t SmallValue = 0x000000AA;
+ // Floating-point values are initialized as NaNs because they propagate. Using
+ // a repeated byte pattern means that it will be easier to initialize
+ // all-floating-point aggregates and arrays with memset. Further, aggregates
+ // which mix integral and a few floats might also initialize with memset
+ // followed by a handful of stores for the floats. Using fairly unique NaNs
+ // also means they'll be easier to distinguish in a crash.
+ constexpr bool NegativeNaN = true;
+ constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
+ if (Ty->isIntOrIntVectorTy()) {
+ unsigned BitWidth = cast<llvm::IntegerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getBitWidth();
+ if (BitWidth <= 64)
+ return llvm::ConstantInt::get(Ty, LargeValue);
+ return llvm::ConstantInt::get(
+ Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, LargeValue)));
+ }
+ if (Ty->isPtrOrPtrVectorTy()) {
+ auto *PtrTy = cast<llvm::PointerType>(
+ Ty->isVectorTy() ? Ty->getVectorElementType() : Ty);
+ unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
+ PtrTy->getAddressSpace());
+ llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
+ uint64_t IntValue;
+ switch (PtrWidth) {
+ default:
+ llvm_unreachable("pattern initialization of unsupported pointer width");
+ case 64:
+ IntValue = LargeValue;
+ break;
+ case 32:
+ IntValue = SmallValue;
+ break;
}
- return Pattern;
+ auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
+ return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
+ }
+ if (Ty->isFPOrFPVectorTy()) {
+ unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
+ (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty)
+ ->getFltSemantics());
+ llvm::APInt Payload(64, NaNPayload);
+ if (BitWidth >= 64)
+ Payload = llvm::APInt::getSplat(BitWidth, Payload);
+ return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
+ }
+ if (Ty->isArrayTy()) {
+ // Note: this doesn't touch tail padding (at the end of an object, before
+ // the next array object). It is instead handled by replaceUndef.
+ auto *ArrTy = cast<llvm::ArrayType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Element(
+ ArrTy->getNumElements(), patternFor(CGM, ArrTy->getElementType()));
+ return llvm::ConstantArray::get(ArrTy, Element);
+ }
+
+ // Note: this doesn't touch struct padding. It will initialize as much union
+ // padding as is required for the largest type in the union. Padding is
+ // instead handled by replaceUndef. Stores to structs with volatile members
+ // don't have a volatile qualifier when initialized according to C++. This is
+ // fine because stack-based volatiles don't really have volatile semantics
+ // anyways, and the initialization shouldn't be observable.
+ auto *StructTy = cast<llvm::StructType>(Ty);
+ llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
+ for (unsigned El = 0; El != Struct.size(); ++El)
+ Struct[El] = patternFor(CGM, StructTy->getElementType(El));
+ return llvm::ConstantStruct::get(StructTy, Struct);
+}
+
+static Address createUnnamedGlobalFrom(CodeGenModule &CGM, const VarDecl &D,
+ CGBuilderTy &Builder,
+ llvm::Constant *Constant,
+ CharUnits Align) {
+ auto FunctionName = [&](const DeclContext *DC) -> std::string {
+ if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
+ if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
+ return CC->getNameAsString();
+ if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
+ return CD->getNameAsString();
+ return CGM.getMangledName(FD);
+ } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
+ return OM->getNameAsString();
+ } else if (isa<BlockDecl>(DC)) {
+ return "<block>";
+ } else if (isa<CapturedDecl>(DC)) {
+ return "<captured>";
+ } else {
+ llvm::llvm_unreachable_internal("expected a function or method");
+ }
+ };
+
+ auto *Ty = Constant->getType();
+ bool isConstant = true;
+ llvm::GlobalVariable *InsertBefore = nullptr;
+ unsigned AS = CGM.getContext().getTargetAddressSpace(
+ CGM.getStringLiteralAddressSpace());
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
+ Constant,
+ "__const." + FunctionName(D.getParentFunctionOrMethod()) + "." +
+ D.getName(),
+ InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
+ Address SrcPtr = Address(GV, Align);
+ llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(), AS);
+ if (SrcPtr.getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+ return SrcPtr;
+}
+
+static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
+ Address Loc, bool isVolatile,
+ CGBuilderTy &Builder,
+ llvm::Constant *constant) {
+ auto *Ty = constant->getType();
+ bool isScalar = Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy() ||
+ Ty->isFPOrFPVectorTy();
+ if (isScalar) {
+ Builder.CreateStore(constant, Loc, isVolatile);
+ return;
}
- if (llvm::ConstantDataSequential *CDS =
- dyn_cast<llvm::ConstantDataSequential>(C)) {
- BytePattern Pattern(BytePattern::Any());
- for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
- llvm::Constant *Elt = CDS->getElementAsConstant(I);
- Pattern = Pattern.merge(constantIsRepeatedBytePattern(Elt));
- if (Pattern.isNone())
- return Pattern;
+ auto *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
+ auto *IntPtrTy = CGM.getDataLayout().getIntPtrType(CGM.getLLVMContext());
+
+ // If the initializer is all or mostly the same, codegen with bzero / memset
+ // then do a few stores afterward.
+ uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
+ auto *SizeVal = llvm::ConstantInt::get(IntPtrTy, ConstantSize);
+ if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ isVolatile);
+
+ bool valueAlreadyCorrect =
+ constant->isNullValue() || isa<llvm::UndefValue>(constant);
+ if (!valueAlreadyCorrect) {
+ Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
+ emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
+ }
+ return;
+ }
+
+ llvm::Value *Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
+ if (Pattern) {
+ uint64_t Value = 0x00;
+ if (!isa<llvm::UndefValue>(Pattern)) {
+ const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
+ assert(AP.getBitWidth() <= 8);
+ Value = AP.getLimitedValue();
}
- return Pattern;
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
+ isVolatile);
+ return;
}
- // BlockAddress, ConstantExpr, and everything else is scary.
- return BytePattern::None();
+ Builder.CreateMemCpy(
+ Loc,
+ createUnnamedGlobalFrom(CGM, D, Builder, constant, Loc.getAlignment()),
+ SizeVal, isVolatile);
}
-/// Decide whether we should use memset to initialize a local variable instead
-/// of using a memcpy from a constant global. Assumes we've already decided to
-/// not user bzero.
-/// FIXME We could be more clever, as we are for bzero above, and generate
-/// memset followed by stores. It's unclear that's worth the effort.
-static BytePattern shouldUseMemSetToInitialize(llvm::Constant *Init,
- uint64_t GlobalSize) {
- uint64_t SizeLimit = 32;
- if (GlobalSize <= SizeLimit)
- return BytePattern::None();
- return constantIsRepeatedBytePattern(Init);
+static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
+ Address Loc, bool isVolatile,
+ CGBuilderTy &Builder) {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *constant = llvm::Constant::getNullValue(ElTy);
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+}
+
+static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
+ Address Loc, bool isVolatile,
+ CGBuilderTy &Builder) {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *constant = patternFor(CGM, ElTy);
+ assert(!isa<llvm::UndefValue>(constant));
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
+}
+
+static bool containsUndef(llvm::Constant *constant) {
+ auto *Ty = constant->getType();
+ if (isa<llvm::UndefValue>(constant))
+ return true;
+ if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
+ for (llvm::Use &Op : constant->operands())
+ if (containsUndef(cast<llvm::Constant>(Op)))
+ return true;
+ return false;
+}
+
+static llvm::Constant *replaceUndef(llvm::Constant *constant) {
+ // FIXME: when doing pattern initialization, replace undef with 0xAA instead.
+ // FIXME: also replace padding between values by creating a new struct type
+ // which has no padding.
+ auto *Ty = constant->getType();
+ if (isa<llvm::UndefValue>(constant))
+ return llvm::Constant::getNullValue(Ty);
+ if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
+ return constant;
+ if (!containsUndef(constant))
+ return constant;
+ llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
+ for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
+ auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
+ Values[Op] = replaceUndef(OpValue);
+ }
+ if (Ty->isStructTy())
+ return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
+ if (Ty->isArrayTy())
+ return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
+ assert(Ty->isVectorTy());
+ return llvm::ConstantVector::get(Values);
}
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
@@ -1098,6 +1237,7 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
// For each dimension stores its QualType and corresponding
// size-expression Value.
SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
+ SmallVector<IdentifierInfo *, 4> VLAExprNames;
// Break down the array into individual dimensions.
QualType Type1D = D.getType();
@@ -1106,8 +1246,14 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
else {
- auto SizeExprAddr = CreateDefaultAlignTempAlloca(
- VlaSize.NumElts->getType(), "__vla_expr");
+ // Generate a locally unique name for the size expression.
+ Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
+ SmallString<12> Buffer;
+ StringRef NameRef = Name.toStringRef(Buffer);
+ auto &Ident = getContext().Idents.getOwn(NameRef);
+ VLAExprNames.push_back(&Ident);
+ auto SizeExprAddr =
+ CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
Dimensions.emplace_back(SizeExprAddr.getPointer(),
Type1D.getUnqualifiedType());
@@ -1121,20 +1267,20 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
// Register each dimension's size-expression with a DILocalVariable,
// so that it can be used by CGDebugInfo when instantiating a DISubrange
// to describe this array.
+ unsigned NameIdx = 0;
for (auto &VlaSize : Dimensions) {
llvm::Metadata *MD;
if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
MD = llvm::ConstantAsMetadata::get(C);
else {
// Create an artificial VarDecl to generate debug info for.
- IdentifierInfo &NameIdent = getContext().Idents.getOwn(
- cast<llvm::AllocaInst>(VlaSize.NumElts)->getName());
+ IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
auto QT = getContext().getIntTypeForBitwidth(
VlaExprTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
- D.getLocation(), D.getLocation(), &NameIdent, QT,
+ D.getLocation(), D.getLocation(), NameIdent, QT,
getContext().CreateTypeSourceInfo(QT), SC_Auto);
ArtificialDecl->setImplicit();
@@ -1157,8 +1303,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
AutoVarEmission emission(D);
- bool isByRef = D.hasAttr<BlocksAttr>();
- emission.IsByRef = isByRef;
+ bool isEscapingByRef = D.isEscapingByref();
+ emission.IsEscapingByRef = isEscapingByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
@@ -1197,8 +1343,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// in OpenCL.
if ((!getLangOpts().OpenCL ||
Ty.getAddressSpace() == LangAS::opencl_constant) &&
- (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
- CGM.isTypeConstant(Ty, true))) {
+ (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
+ !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1250,7 +1396,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
} else {
CharUnits allocaAlignment;
llvm::Type *allocaTy;
- if (isByRef) {
+ if (isEscapingByRef) {
auto &byrefInfo = getBlockByrefInfo(&D);
allocaTy = byrefInfo.Type;
allocaAlignment = byrefInfo.ByrefAlignment;
@@ -1439,6 +1585,8 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
QualType type = D.getType();
+ bool isVolatile = type.isVolatileQualified();
+
// If this local has an initializer, emit it now.
const Expr *Init = D.getInit();
@@ -1450,7 +1598,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
}
// Initialize the structure of a __block variable.
- if (emission.IsByRef)
+ if (emission.IsEscapingByRef)
emitByrefStructureInit(emission);
// Initialize the variable here if it doesn't have a initializer and it is a
@@ -1460,30 +1608,126 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
type.isNonTrivialToPrimitiveDefaultInitialize() ==
QualType::PDIK_Struct) {
LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
- if (emission.IsByRef)
+ if (emission.IsEscapingByRef)
drillIntoBlockVariable(*this, Dst, &D);
defaultInitNonTrivialCStructVar(Dst);
return;
}
- if (isTrivialInitializer(Init))
- return;
-
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
- bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
+ bool capturedByInit =
+ Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
Address Loc =
- capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+ capturedByInit ? emission.Addr : emission.getObjectAddress(*this);
+
+ // Note: constexpr already initializes everything correctly.
+ LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
+ (D.isConstexpr()
+ ? LangOptions::TrivialAutoVarInitKind::Uninitialized
+ : (D.getAttr<UninitializedAttr>()
+ ? LangOptions::TrivialAutoVarInitKind::Uninitialized
+ : getContext().getLangOpts().getTrivialAutoVarInit()));
+
+ auto initializeWhatIsTechnicallyUninitialized = [&]() {
+ if (trivialAutoVarInit ==
+ LangOptions::TrivialAutoVarInitKind::Uninitialized)
+ return;
+
+ CharUnits Size = getContext().getTypeSizeInChars(type);
+ if (!Size.isZero()) {
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled above");
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Pattern:
+ emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
+ break;
+ }
+ return;
+ }
+
+ // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
+ // them, so emit a memcpy with the VLA size to initialize each element.
+ // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
+ // will catch that code, but there exists code which generates zero-sized
+ // VLAs. Be nice and initialize whatever they requested.
+ const VariableArrayType *VlaType =
+ dyn_cast_or_null<VariableArrayType>(getContext().getAsArrayType(type));
+ if (!VlaType)
+ return;
+ auto VlaSize = getVLASize(VlaType);
+ auto SizeVal = VlaSize.NumElts;
+ CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
+ switch (trivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ llvm_unreachable("Uninitialized handled above");
+
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ isVolatile);
+ break;
+
+ case LangOptions::TrivialAutoVarInitKind::Pattern: {
+ llvm::Type *ElTy = Loc.getElementType();
+ llvm::Constant *Constant = patternFor(CGM, ElTy);
+ CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
+ llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
+ llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
+ llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
+ SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
+ "vla.iszerosized");
+ Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
+ EmitBlock(SetupBB);
+ if (!EltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
+ llvm::Value *BaseSizeInChars =
+ llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
+ Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
+ llvm::Value *End =
+ Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
+ llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
+ EmitBlock(LoopBB);
+ llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
+ Cur->addIncoming(Begin.getPointer(), OriginBB);
+ CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
+ Builder.CreateMemCpy(
+ Address(Cur, CurAlign),
+ createUnnamedGlobalFrom(CGM, D, Builder, Constant, ConstantAlign),
+ BaseSizeInChars, isVolatile);
+ llvm::Value *Next =
+ Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
+ llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
+ Builder.CreateCondBr(Done, ContBB, LoopBB);
+ Cur->addIncoming(Next, LoopBB);
+ EmitBlock(ContBB);
+ } break;
+ }
+ };
+
+ if (isTrivialInitializer(Init)) {
+ initializeWhatIsTechnicallyUninitialized();
+ return;
+ }
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
+ if (constant && trivialAutoVarInit !=
+ LangOptions::TrivialAutoVarInitKind::Uninitialized)
+ constant = replaceUndef(constant);
}
if (!constant) {
+ initializeWhatIsTechnicallyUninitialized();
LValue lv = MakeAddrLValue(Loc, type);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
@@ -1496,61 +1740,11 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
- // If this is a simple aggregate initialization, we can optimize it
- // in various ways.
- bool isVolatile = type.isVolatileQualified();
-
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtrTy,
- getContext().getTypeSizeInChars(type).getQuantity());
-
llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
if (Loc.getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
- // If the initializer is all or mostly the same, codegen with bzero / memset
- // then do a few stores afterward.
- uint64_t ConstantSize =
- CGM.getDataLayout().getTypeAllocSize(constant->getType());
- if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
- isVolatile);
- // Zero and undef don't require a stores.
- if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
- Loc = Builder.CreateBitCast(Loc,
- constant->getType()->getPointerTo(Loc.getAddressSpace()));
- emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
- }
- return;
- }
-
- BytePattern Pattern = shouldUseMemSetToInitialize(constant, ConstantSize);
- if (!Pattern.isNone()) {
- uint8_t Value = Pattern.isAny() ? 0x00 : Pattern.getValue();
- Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, Value), SizeVal,
- isVolatile);
- return;
- }
-
- // Otherwise, create a temporary global with the initializer then
- // memcpy from the global to the alloca.
- std::string Name = getStaticDeclName(CGM, D);
- unsigned AS = CGM.getContext().getTargetAddressSpace(
- CGM.getStringLiteralAddressSpace());
- BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
-
- llvm::GlobalVariable *GV = new llvm::GlobalVariable(
- CGM.getModule(), constant->getType(), true,
- llvm::GlobalValue::PrivateLinkage, constant, Name, nullptr,
- llvm::GlobalValue::NotThreadLocal, AS);
- GV->setAlignment(Loc.getAlignment().getQuantity());
- GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- Address SrcPtr = Address(GV, Loc.getAlignment());
- if (SrcPtr.getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
-
- Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile);
+ emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
@@ -1712,12 +1906,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
// If this is a block variable, call _Block_object_destroy
// (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
// mode.
- if (emission.IsByRef && CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ if (emission.IsEscapingByRef &&
+ CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
if (emission.Variable->getType().isObjCGCWeak())
Flags |= BLOCK_FIELD_IS_WEAK;
enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
- /*LoadBlockVarAddr*/ false);
+ /*LoadBlockVarAddr*/ false,
+ cxxDestructorCanThrow(emission.Variable->getType()));
}
}
@@ -2134,15 +2330,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// cleanup to do the release at the end of the function.
bool isConsumed = D.hasAttr<NSConsumedAttr>();
- // 'self' is always formally __strong, but if this is not an
- // init method then we don't want to retain it.
+ // If a parameter is pseudo-strong then we can omit the implicit retain.
if (D.isARCPseudoStrong()) {
- const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
- assert(&D == method->getSelfDecl());
- assert(lt == Qualifiers::OCL_Strong);
- assert(qs.hasConst());
- assert(method->getMethodFamily() != OMF_init);
- (void) method;
+ assert(lt == Qualifiers::OCL_Strong &&
+ "pseudo-strong variable isn't strong?");
+ assert(qs.hasConst() && "pseudo-strong variable should be const!");
lt = Qualifiers::OCL_ExplicitNone;
}
@@ -2224,3 +2416,7 @@ void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
return;
getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
}
+
+void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
+ getOpenMPRuntime().checkArchForUnifiedAddressing(*this, D);
+}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 510863f68eff..9aa31f181e99 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -15,7 +15,7 @@
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
@@ -26,7 +26,10 @@ using namespace CodeGen;
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
ConstantAddress DeclPtr) {
- assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(
+ (D.hasGlobalStorage() ||
+ (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
+ "VarDecl must have global or local (in the case of OpenCL) storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
@@ -63,15 +66,24 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- ConstantAddress addr) {
+ ConstantAddress Addr) {
+ // Honor __attribute__((no_destroy)) and bail instead of attempting
+ // to emit a reference to a possibly nonexistent destructor, which
+ // in turn can cause a crash. This will result in a global constructor
+ // that isn't balanced out by a destructor call as intended by the
+ // attribute. This also checks for -fno-c++-static-destructors and
+ // bails even if the attribute is not present.
+ if (D.isNoDestroy(CGF.getContext()))
+ return;
+
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
- QualType type = D.getType();
- QualType::DestructionKind dtorKind = type.isDestructedType();
+ QualType Type = D.getType();
+ QualType::DestructionKind DtorKind = Type.isDestructedType();
- switch (dtorKind) {
+ switch (DtorKind) {
case QualType::DK_none:
return;
@@ -86,13 +98,14 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
return;
}
- llvm::Constant *function;
- llvm::Constant *argument;
+ llvm::Constant *Func;
+ llvm::Constant *Argument;
// Special-case non-array C++ destructors, if they have the right signature.
// Under some ABIs, destructors return this instead of void, and cannot be
- // passed directly to __cxa_atexit if the target does not allow this mismatch.
- const CXXRecordDecl *Record = type->getAsCXXRecordDecl();
+ // passed directly to __cxa_atexit if the target does not allow this
+ // mismatch.
+ const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
bool CanRegisterDestructor =
Record && (!CGM.getCXXABI().HasThisReturn(
GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
@@ -103,43 +116,47 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
assert(!Record->hasTrivialDestructor());
- CXXDestructorDecl *dtor = Record->getDestructor();
+ CXXDestructorDecl *Dtor = Record->getDestructor();
- function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
- argument = llvm::ConstantExpr::getBitCast(
- addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo());
+ Func = CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete);
+ Argument = llvm::ConstantExpr::getBitCast(
+ Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
- function = CodeGenFunction(CGM)
- .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind),
- CGF.needsEHCleanup(dtorKind), &D);
- argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ Func = CodeGenFunction(CGM)
+ .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
+ CGF.needsEHCleanup(DtorKind), &D);
+ Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
- CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument);
+ CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
}
/// Emit code to cause the variable at the given address to be considered as
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Addr) {
+ return CGF.EmitInvariantStart(
+ Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
+}
+
+void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
// Do not emit the intrinsic if we're not optimizing.
- if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
return;
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
// Overloaded address space type.
- llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy};
- llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr);
+ llvm::Type *ObjectPtr[1] = {Int8PtrTy};
+ llvm::Constant *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
- CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
- uint64_t Width = WidthChars.getQuantity();
- llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
- llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
- CGF.Builder.CreateCall(InvariantStart, Args);
+ uint64_t Width = Size.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
+ Builder.CreateCall(InvariantStart, Args);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
@@ -347,6 +364,10 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
+ !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+
if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
!isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SafeStack);
@@ -355,6 +376,22 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
!isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
+ auto RASignKind = getCodeGenOpts().getSignReturnAddress();
+ if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
+ Fn->addFnAttr("sign-return-address",
+ RASignKind == CodeGenOptions::SignReturnAddressScope::All
+ ? "all"
+ : "non-leaf");
+ auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
+ Fn->addFnAttr("sign-return-address-key",
+ RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ if (getCodeGenOpts().BranchTargetEnforcement)
+ Fn->addFnAttr("branch-target-enforcement");
+
return Fn;
}
@@ -565,7 +602,7 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
if (D->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
- CurEHLocation = D->getLocStart();
+ CurEHLocation = D->getBeginLoc();
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
@@ -587,7 +624,7 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> Decls,
- Address Guard) {
+ ConstantAddress Guard) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
@@ -611,6 +648,12 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
// initializers use previously-initialized thread_local vars, that's
// probably supposed to be OK, but the standard doesn't say.
Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
+
+ // The guard variable can't ever change again.
+ EmitInvariantStart(
+ Guard.getPointer(),
+ CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
}
RunCleanupsScope Scope(*this);
@@ -679,7 +722,7 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
- CurEHLocation = VD->getLocStart();
+ CurEHLocation = VD->getBeginLoc();
StartFunction(VD, getContext().VoidTy, fn, FI, args);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index a2ff102e1ab4..5756e13d2623 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -66,7 +66,7 @@ llvm::Constant *CodeGenModule::getTerminateFn() {
name = "__std_terminate";
else
name = "?terminate@@YAXXZ";
- } else if (getLangOpts().ObjC1 &&
+ } else if (getLangOpts().ObjC &&
getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
else
@@ -224,7 +224,7 @@ const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
if (FD && FD->usesSEHTry())
return getSEHPersonalityMSVC(T);
- if (L.ObjC1)
+ if (L.ObjC)
return L.CPlusPlus ? getObjCXXPersonality(Target, L)
: getObjCPersonality(Target, L);
return L.CPlusPlus ? getCXXPersonality(Target, L)
@@ -250,7 +250,11 @@ static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
- return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+ llvm::PointerType* Int8PtrTy = llvm::PointerType::get(
+ llvm::Type::getInt8Ty(CGM.getLLVMContext()),
+ CGM.getDataLayout().getProgramAddressSpace());
+
+ return llvm::ConstantExpr::getBitCast(Fn, Int8PtrTy);
}
/// Check whether a landingpad instruction only uses C++ features.
@@ -315,7 +319,7 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
// If we're not in ObjC++ -fexceptions, there's nothing to do.
- if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC || !LangOpts.Exceptions)
return;
// Both the problem this endeavors to fix and the way the logic
@@ -1248,7 +1252,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// we follow the false destination for each of the cond branches to reach
// the rethrow block.
llvm::BasicBlock *RethrowBlock = WasmCatchStartBlock;
- while (llvm::TerminatorInst *TI = RethrowBlock->getTerminator()) {
+ while (llvm::Instruction *TI = RethrowBlock->getTerminator()) {
auto *BI = cast<llvm::BranchInst>(TI);
assert(BI->isConditional());
RethrowBlock = BI->getSuccessor(1);
@@ -1623,8 +1627,16 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
// Compute the two argument values.
QualType ArgTys[2] = {Context.UnsignedCharTy, Context.VoidPtrTy};
- llvm::Value *LocalAddrFn = CGM.getIntrinsic(llvm::Intrinsic::localaddress);
- llvm::Value *FP = CGF.Builder.CreateCall(LocalAddrFn);
+ llvm::Value *FP = nullptr;
+ // If CFG.IsOutlinedSEHHelper is true, then we are within a finally block.
+ if (CGF.IsOutlinedSEHHelper) {
+ FP = &CGF.CurFn->arg_begin()[1];
+ } else {
+ llvm::Value *LocalAddrFn =
+ CGM.getIntrinsic(llvm::Intrinsic::localaddress);
+ FP = CGF.Builder.CreateCall(LocalAddrFn);
+ }
+
llvm::Value *IsForEH =
llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
Args.add(RValue::get(IsForEH), ArgTys[0]);
@@ -1777,7 +1789,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
// frame pointer of the parent function. We only need to do this in filters,
// since finally funclets recover the parent FP for us.
llvm::Function *RecoverFPIntrin =
- CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
+ CGM.getIntrinsic(llvm::Intrinsic::eh_recoverfp);
llvm::Constant *ParentI8Fn =
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
@@ -1823,13 +1835,13 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
bool IsFilter,
const Stmt *OutlinedStmt) {
- SourceLocation StartLoc = OutlinedStmt->getLocStart();
+ SourceLocation StartLoc = OutlinedStmt->getBeginLoc();
// Get the mangled function name.
SmallString<128> Name;
{
llvm::raw_svector_ostream OS(Name);
- const FunctionDecl *ParentSEHFn = ParentCGF.CurSEHParent;
+ const NamedDecl *ParentSEHFn = ParentCGF.CurSEHParent;
assert(ParentSEHFn && "No CurSEHParent!");
MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
if (IsFilter)
@@ -1871,10 +1883,10 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
IsOutlinedSEHHelper = true;
StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
- OutlinedStmt->getLocStart(), OutlinedStmt->getLocStart());
+ OutlinedStmt->getBeginLoc(), OutlinedStmt->getBeginLoc());
CurSEHParent = ParentCGF.CurSEHParent;
- CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FnInfo, CurFn);
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
}
@@ -1893,7 +1905,7 @@ CodeGenFunction::GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
FilterExpr->getType()->isSignedIntegerType());
Builder.CreateStore(R, ReturnValue);
- FinishFunction(FilterExpr->getLocEnd());
+ FinishFunction(FilterExpr->getEndLoc());
return CurFn;
}
@@ -1907,7 +1919,7 @@ CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
// Emit the original filter expression, convert to i32, and return.
EmitStmt(FinallyBlock);
- FinishFunction(FinallyBlock->getLocEnd());
+ FinishFunction(FinallyBlock->getEndLoc());
return CurFn;
}
@@ -1972,6 +1984,11 @@ llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
return Builder.CreateZExt(&*AI, Int32Ty);
}
+void CodeGenFunction::pushSEHCleanup(CleanupKind Kind,
+ llvm::Function *FinallyFunc) {
+ EHStack.pushCleanup<PerformSEHFinally>(Kind, FinallyFunc);
+}
+
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index f168dd02ead1..34a921e2dc00 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -26,7 +26,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
@@ -419,8 +419,12 @@ LValue CodeGenFunction::
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->GetTemporaryExpr();
- // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
- // as that will cause the lifetime adjustment to be lost for ARC
+ assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
+ !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
+ "Reference should never be pseudo-strong!");
+
+ // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
+ // as that will cause the lifetime adjustment to be lost for ARC
auto ownership = M->getType().getObjCLifetime();
if (ownership != Qualifiers::OCL_None &&
ownership != Qualifiers::OCL_ExplicitNone) {
@@ -498,18 +502,51 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
} else {
switch (M->getStorageDuration()) {
case SD_Automatic:
- case SD_FullExpression:
if (auto *Size = EmitLifetimeStart(
CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
Alloca.getPointer())) {
- if (M->getStorageDuration() == SD_Automatic)
- pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
- Alloca, Size);
- else
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
- Size);
+ pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
+ Alloca, Size);
+ }
+ break;
+
+ case SD_FullExpression: {
+ if (!ShouldEmitLifetimeMarkers)
+ break;
+
+ // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
+ // marker. Instead, start the lifetime of a conditional temporary earlier
+ // so that it's unconditional. Don't do this in ASan's use-after-scope
+ // mode so that it gets the more precise lifetime marks. If the type has
+ // a non-trivial destructor, we'll have a cleanup block for it anyway,
+ // so this typically doesn't help; skip it in that case.
+ ConditionalEvaluation *OldConditional = nullptr;
+ CGBuilderTy::InsertPoint OldIP;
+ if (isInConditionalBranch() && !E->getType().isDestructedType() &&
+ !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
+ OldConditional = OutermostConditional;
+ OutermostConditional = nullptr;
+
+ OldIP = Builder.saveIP();
+ llvm::BasicBlock *Block = OldConditional->getStartingBlock();
+ Builder.restoreIP(CGBuilderTy::InsertPoint(
+ Block, llvm::BasicBlock::iterator(Block->back())));
+ }
+
+ if (auto *Size = EmitLifetimeStart(
+ CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
+ Alloca.getPointer())) {
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
+ Size);
+ }
+
+ if (OldConditional) {
+ OutermostConditional = OldConditional;
+ Builder.restoreIP(OldIP);
}
break;
+ }
+
default:
break;
}
@@ -1043,7 +1080,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getLocStart());
+ CE->getBeginLoc());
}
return CE->getCastKind() != CK_AddressSpaceConversion
? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
@@ -1227,6 +1264,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ConstantExprClass:
+ return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
case Expr::ParenExprClass:
return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
@@ -1458,6 +1497,16 @@ CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
return ConstantEmission();
}
+llvm::Value *CodeGenFunction::emitScalarConstant(
+ const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
+ assert(Constant && "not a constant");
+ if (Constant.isReference())
+ return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
+ E->getExprLoc())
+ .getScalarVal();
+ return Constant.getValue();
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
@@ -2237,18 +2286,14 @@ static LValue EmitThreadPrivateVarDeclLValue(
static Address emitDeclTargetLinkVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD, QualType T) {
- for (const auto *D : VD->redecls()) {
- if (!VD->hasAttrs())
- continue;
- if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
- if (Attr->getMapType() == OMPDeclareTargetDeclAttr::MT_Link) {
- QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
- Address Addr =
- CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
- return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
- }
- }
- return Address::invalid();
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_To)
+ return Address::invalid();
+ assert(*Res == OMPDeclareTargetDeclAttr::MT_Link && "Expected link clause");
+ QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
+ Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
}
Address
@@ -2408,6 +2453,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// A DeclRefExpr for a reference initialized by a constant expression can
// appear without being odr-used. Directly emit the constant initializer.
const Expr *Init = VD->getAnyInitializer(VD);
+ const auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl);
if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
VD->isUsableInConstantExpressions(getContext()) &&
VD->checkInitIsICE() &&
@@ -2417,7 +2463,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
(LocalDeclMap.count(VD->getCanonicalDecl()) ||
CapturedStmtInfo->lookup(VD->getCanonicalDecl()))) ||
LambdaCaptureFields.lookup(VD->getCanonicalDecl()) ||
- isa<BlockDecl>(CurCodeDecl)))) {
+ (BD && BD->capturesVariable(VD))))) {
llvm::Constant *Val =
ConstantEmitter(*this).emitAbstract(E->getLocation(),
*VD->evaluateValue(),
@@ -2456,7 +2502,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
assert(isa<BlockDecl>(CurCodeDecl));
- Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>());
+ Address addr = GetAddrOfBlockDecl(VD);
return MakeAddrLValue(addr, T, AlignmentSource::Decl);
}
}
@@ -2508,7 +2554,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
// Drill into block byref variables.
- bool isBlockByref = VD->hasAttr<BlocksAttr>();
+ bool isBlockByref = VD->isEscapingByref();
if (isBlockByref) {
addr = emitBlockByrefAddress(addr, VD);
}
@@ -2571,7 +2617,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
@@ -2632,7 +2678,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
if (FnName.startswith("\01"))
FnName = FnName.substr(1);
StringRef NameItems[] = {
- PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
+ PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
std::string Name = SL->getString();
@@ -2837,6 +2883,11 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
+ Optional<ApplyDebugLocation> DL;
+ if (!CGF.Builder.getCurrentDebugLocation()) {
+ // Ensure that the call has at least an artificial debug location.
+ DL.emplace(CGF, SourceLocation());
+ }
bool NeedsAbortSuffix =
IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
@@ -3448,7 +3499,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
@@ -3901,7 +3952,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo,
FieldTBAAInfo);
if (RecordCVR & Qualifiers::Volatile)
- RefLVal.getQuals().setVolatile(true);
+ RefLVal.getQuals().addVolatile();
addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
// Qualifiers on the struct don't apply to the referencee.
@@ -4121,8 +4172,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
- case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -4193,8 +4245,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
- /*MayBeNull=*/false,
- CFITCK_DerivedCast, E->getLocStart());
+ /*MayBeNull=*/false, CFITCK_DerivedCast,
+ E->getBeginLoc());
return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
@@ -4210,12 +4262,21 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
- /*MayBeNull=*/false,
- CFITCK_UnrelatedCast, E->getLocStart());
+ /*MayBeNull=*/false, CFITCK_UnrelatedCast,
+ E->getBeginLoc());
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
+ case CK_AddressSpaceConversion: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType DestTy = getContext().getPointerType(E->getType());
+ llvm::Value *V = getTargetHooks().performAddrSpaceCast(
+ *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(),
+ E->getType().getAddressSpace(), ConvertType(DestTy));
+ return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()),
+ E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
+ }
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
Address V = Builder.CreateElementBitCast(LV.getAddress(),
@@ -4223,10 +4284,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
- case CK_ZeroToOCLQueue:
- llvm_unreachable("NULL to OpenCL queue lvalue cast is not valid");
- case CK_ZeroToOCLEvent:
- llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
+ case CK_ZeroToOCLOpaqueType:
+ llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
}
llvm_unreachable("Unhandled lvalue cast kind?");
@@ -4333,7 +4392,7 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
}
llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
- return CGCallee::forDirect(calleePtr, FD);
+ return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
@@ -4377,8 +4436,13 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
calleePtr = EmitLValue(E).getPointer();
}
assert(functionType->isFunctionType());
- CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(),
- E->getReferencedDeclOfCallee());
+
+ GlobalDecl GD;
+ if (const auto *VD =
+ dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
+ GD = GlobalDecl(VD);
+
+ CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
CGCallee callee(calleeInfo, calleePtr);
return callee;
}
@@ -4563,7 +4627,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
- const Decl *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl();
+ const Decl *TargetDecl =
+ OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
// We can only guarantee that a function is called from the correct
@@ -4620,10 +4685,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
- llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(E->getLocStart()),
- EmitCheckTypeDescriptor(CalleeType)
- };
+ llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
+ EmitCheckTypeDescriptor(CalleeType)};
EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
SanitizerHandler::FunctionTypeMismatch, StaticData, CalleePtr);
@@ -4657,7 +4720,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
- EmitCheckSourceLocation(E->getLocStart()),
+ EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(QualType(FnType, 0)),
};
if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 62641102861c..db49b3f28a59 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -125,6 +125,10 @@ public:
return Visit(E->getReplacement());
}
+ void VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
// l-values.
void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
@@ -847,10 +851,11 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index f29ef754c03f..884ce96859c5 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -17,8 +17,8 @@
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "ConstantEmitter.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
@@ -177,7 +177,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
- CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
+ CGCallee callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
ReturnValue);
}
@@ -353,13 +354,13 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
else if (!DevirtualizedMethod)
Callee = CGCallee::forDirect(
CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
- Dtor);
+ GlobalDecl(Dtor, Dtor_Complete));
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
- DDtor);
+ CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
+ GlobalDecl(DDtor, Dtor_Complete));
}
EmitCXXMemberOrOperatorCall(
CalleeDecl, Callee, ReturnValue, This.getPointer(),
@@ -371,8 +372,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGCallee Callee;
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
- Ctor);
+ CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
+ GlobalDecl(Ctor, Ctor_Complete));
} else if (UseVirtualCall) {
Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
@@ -383,17 +384,18 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
std::tie(VTable, RD) =
CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(),
MD->getParent());
- EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getLocStart());
+ EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
}
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
+ Callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
else {
- Callee = CGCallee::forDirect(
- CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
- DevirtualizedMethod);
+ Callee =
+ CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
+ GlobalDecl(DevirtualizedMethod));
}
}
@@ -1293,7 +1295,7 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
@@ -1654,9 +1656,10 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// Emit a null check on the allocation result if the allocation
// function is allowed to return null (because it has a non-throwing
// exception spec or is the reserved placement new) and we have an
- // interesting initializer.
- bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
- (!allocType.isPODType(getContext()) || E->hasInitializer());
+ // interesting initializer will be running sanitizers on the initialization.
+ bool nullCheck = E->shouldNullCheckAllocation() &&
+ (!allocType.isPODType(getContext()) || E->hasInitializer() ||
+ sanitizePerformTypeCheck());
llvm::BasicBlock *nullCheckBB = nullptr;
llvm::BasicBlock *contBB = nullptr;
@@ -2252,7 +2255,6 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
}
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
- RunCleanupsScope Scope(*this);
LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index fb176093a741..2db693b44c90 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -101,6 +101,9 @@ public:
llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
@@ -505,10 +508,11 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_AddressSpaceConversion:
case CK_IntToOCLSampler:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 68766479a539..c9475840aeeb 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -47,7 +47,7 @@ class ConstStructBuilder {
public:
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater,
QualType ValTy);
static llvm::Constant *BuildStruct(ConstantEmitter &Emitter,
@@ -76,7 +76,7 @@ private:
void ConvertStructToPacked();
bool Build(InitListExpr *ILE);
- bool Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base,
+ bool Build(ConstExprEmitter *Emitter, llvm::Constant *Base,
InitListExpr *Updater);
bool Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
@@ -566,7 +566,7 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater,
QualType ValTy) {
ConstStructBuilder Builder(Emitter);
@@ -723,6 +723,10 @@ public:
return nullptr;
}
+ llvm::Constant *VisitConstantExpr(ConstantExpr *CE, QualType T) {
+ return Visit(CE->getSubExpr(), T);
+ }
+
llvm::Constant *VisitParenExpr(ParenExpr *PE, QualType T) {
return Visit(PE->getSubExpr(), T);
}
@@ -869,8 +873,9 @@ public:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_ZeroToOCLOpaqueType:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
@@ -1026,8 +1031,8 @@ public:
}
if (destType->isRecordType())
- return ConstStructBuilder::BuildStruct(Emitter, this,
- dyn_cast<llvm::ConstantStruct>(Base), Updater, destType);
+ return ConstStructBuilder::BuildStruct(Emitter, this, Base, Updater,
+ destType);
return nullptr;
}
@@ -1102,7 +1107,7 @@ public:
} // end anonymous namespace.
bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
- llvm::ConstantStruct *Base,
+ llvm::Constant *Base,
InitListExpr *Updater) {
assert(Base && "base expression should not be empty");
@@ -1110,7 +1115,7 @@ bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout(
- Base->getType());
+ cast<llvm::StructType>(Base->getType()));
unsigned FieldNo = -1;
unsigned ElementNo = 0;
@@ -1131,7 +1136,7 @@ bool ConstStructBuilder::Build(ConstExprEmitter *ExprEmitter,
if (Field->isUnnamedBitfield())
continue;
- llvm::Constant *EltInit = Base->getOperand(ElementNo);
+ llvm::Constant *EltInit = Base->getAggregateElement(ElementNo);
// Bail out if the type of the ConstantStruct does not have the same layout
// as the type of the InitListExpr.
@@ -1450,6 +1455,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (CD->isTrivial() && CD->isDefaultConstructor())
return CGM.EmitNullConstant(D.getType());
}
+ InConstantContext = true;
}
QualType destType = D.getType();
@@ -1547,7 +1553,7 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
if (destType->isReferenceType())
Success = E->EvaluateAsLValue(Result, CGM.getContext());
else
- Success = E->EvaluateAsRValue(Result, CGM.getContext());
+ Success = E->EvaluateAsRValue(Result, CGM.getContext(), InConstantContext);
llvm::Constant *C;
if (Success && !Result.HasSideEffects)
@@ -1600,6 +1606,7 @@ private:
ConstantLValue tryEmitBase(const APValue::LValueBase &base);
ConstantLValue VisitStmt(const Stmt *S) { return nullptr; }
+ ConstantLValue VisitConstantExpr(const ConstantExpr *E);
ConstantLValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
ConstantLValue VisitStringLiteral(const StringLiteral *E);
ConstantLValue VisitObjCEncodeExpr(const ObjCEncodeExpr *E);
@@ -1755,6 +1762,11 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
ConstantLValue
+ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+}
+
+ConstantLValue
ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
return tryEmitGlobalCompoundLiteral(CGM, Emitter.CGF, E);
}
@@ -1782,7 +1794,7 @@ ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr *E) {
return cast<ConstantAddress>(Res.getAddress());
}
- auto kind = E->getIdentType();
+ auto kind = E->getIdentKind();
if (kind == PredefinedExpr::PrettyFunction) {
return CGM.GetAddrOfConstantCString("top level", ".tmp");
}
@@ -1968,6 +1980,16 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
Elts.push_back(C);
}
+ // This means that the array type is probably "IncompleteType" or some
+ // type that is not ConstantArray.
+ if (CAT == nullptr && CommonElementType == nullptr && !NumInitElts) {
+ const ArrayType *AT = CGM.getContext().getAsArrayType(DestType);
+ CommonElementType = CGM.getTypes().ConvertType(AT->getElementType());
+ llvm::ArrayType *AType = llvm::ArrayType::get(CommonElementType,
+ NumElements);
+ return llvm::ConstantAggregateZero::get(AType);
+ }
+
return EmitArrayConstant(CGM, CAT, CommonElementType, NumElements, Elts,
Filler);
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index c62588c68272..1c14d4c99a23 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -11,11 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
-#include "CGCleanup.h"
#include "CGCXXABI.h"
+#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
@@ -23,8 +23,9 @@
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
@@ -257,8 +258,11 @@ public:
AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
} else {
// Assumptions for function parameters are emitted at the start of the
- // function, so there is no need to repeat that here.
- if (isa<ParmVarDecl>(VD))
+ // function, so there is no need to repeat that here,
+ // unless the alignment-assumption sanitizer is enabled,
+ // then we prefer the assumption over alignment attribute
+ // on IR function param.
+ if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
return;
AVAttr = VD->getAttr<AlignValueAttr>();
@@ -275,7 +279,8 @@ public:
Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
- CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
+ CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(),
+ AlignmentCI->getZExtValue());
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
@@ -302,7 +307,11 @@ public:
/// Known implicit conversion check kinds.
/// Keep in sync with the enum of the same name in ubsan_handlers.h
enum ImplicitConversionCheckKind : unsigned char {
- ICCK_IntegerTruncation = 0,
+ ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
+ ICCK_UnsignedIntegerTruncation = 1,
+ ICCK_SignedIntegerTruncation = 2,
+ ICCK_IntegerSignChange = 3,
+ ICCK_SignedIntegerTruncationOrSignChange = 4,
};
/// Emit a check that an [implicit] truncation of an integer does not
@@ -310,21 +319,39 @@ public:
void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
QualType DstType, SourceLocation Loc);
+ /// Emit a check that an [implicit] conversion of an integer does not change
+ /// the sign of the value. It is not UB, so we use the value after conversion.
+ /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
+ void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, SourceLocation Loc);
+
/// Emit a conversion from the specified type to the specified destination
/// type, both of which are LLVM scalar types.
struct ScalarConversionOpts {
bool TreatBooleanAsSigned;
bool EmitImplicitIntegerTruncationChecks;
+ bool EmitImplicitIntegerSignChangeChecks;
ScalarConversionOpts()
: TreatBooleanAsSigned(false),
- EmitImplicitIntegerTruncationChecks(false) {}
+ EmitImplicitIntegerTruncationChecks(false),
+ EmitImplicitIntegerSignChangeChecks(false) {}
+
+ ScalarConversionOpts(clang::SanitizerSet SanOpts)
+ : TreatBooleanAsSigned(false),
+ EmitImplicitIntegerTruncationChecks(
+ SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
+ EmitImplicitIntegerSignChangeChecks(
+ SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
};
Value *
EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
SourceLocation Loc,
ScalarConversionOpts Opts = ScalarConversionOpts());
+ Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
+ SourceLocation Loc);
+
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
@@ -382,6 +409,9 @@ public:
}
Value *VisitExpr(Expr *S);
+ Value *VisitConstantExpr(ConstantExpr *E) {
+ return Visit(E->getSubExpr());
+ }
Value *VisitParenExpr(ParenExpr *PE) {
return Visit(PE->getSubExpr());
}
@@ -450,19 +480,10 @@ public:
return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
}
- Value *emitConstant(const CodeGenFunction::ConstantEmission &Constant,
- Expr *E) {
- assert(Constant && "not a constant");
- if (Constant.isReference())
- return EmitLoadOfLValue(Constant.getReferenceLValue(CGF, E),
- E->getExprLoc());
- return Constant.getValue();
- }
-
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
- return emitConstant(Constant, E);
+ return CGF.emitScalarConstant(Constant, E);
return EmitLoadOfLValue(E);
}
@@ -664,7 +685,7 @@ public:
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), Ops))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -941,48 +962,233 @@ void ScalarExprEmitter::EmitFloatConversionCheck(
SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
}
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the truncation Src -> Dst was lossy.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+ (void)DstTy; // Only used in assert()
+
+ // This should be truncation of integral types.
+ assert(Src != Dst);
+ assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
+ assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
+ "non-integer llvm type");
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+
+ // If both (src and dst) types are unsigned, then it's an unsigned truncation.
+ // Else, it is a signed truncation.
+ ScalarExprEmitter::ImplicitConversionCheckKind Kind;
+ SanitizerMask Mask;
+ if (!SrcSigned && !DstSigned) {
+ Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
+ Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
+ } else {
+ Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
+ Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
+ }
+
+ llvm::Value *Check = nullptr;
+ // 1. Extend the truncated value back to the same width as the Src.
+ Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
+ // 2. Equality-compare with the original source value
+ Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+ return std::make_pair(Kind, std::make_pair(Check, Mask));
+}
+
void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
Value *Dst, QualType DstType,
SourceLocation Loc) {
- if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation))
+ if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
return;
- llvm::Type *SrcTy = Src->getType();
- llvm::Type *DstTy = Dst->getType();
-
// We only care about int->int conversions here.
// We ignore conversions to/from pointer and/or bool.
if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
return;
- assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
- "clang integer type lowered to non-integer llvm type");
-
- unsigned SrcBits = SrcTy->getScalarSizeInBits();
- unsigned DstBits = DstTy->getScalarSizeInBits();
+ unsigned SrcBits = Src->getType()->getScalarSizeInBits();
+ unsigned DstBits = Dst->getType()->getScalarSizeInBits();
// This must be truncation. Else we do not care.
if (SrcBits <= DstBits)
return;
assert(!DstType->isBooleanType() && "we should not get here with booleans.");
+ // If the integer sign change sanitizer is enabled,
+ // and we are truncating from larger unsigned type to smaller signed type,
+ // let that next sanitizer deal with it.
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
+ (!SrcSigned && DstSigned))
+ return;
+
CodeGenFunction::SanitizerScope SanScope(&CGF);
+ std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+ Check =
+ EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+
+ // Do we care about this type of truncation?
+ if (!CGF.SanOpts.has(Check.second.second))
+ return;
+
+ llvm::Constant *StaticArgs[] = {
+ CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
+ CGF.EmitCheckTypeDescriptor(DstType),
+ llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
+ CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
+ {Src, Dst});
+}
+
+// Should be called within CodeGenFunction::SanitizerScope RAII scope.
+// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
+static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
+ QualType DstType, CGBuilderTy &Builder) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+
+ assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
+ "non-integer llvm type");
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ (void)SrcSigned; // Only used in assert()
+ (void)DstSigned; // Only used in assert()
+ unsigned SrcBits = SrcTy->getScalarSizeInBits();
+ unsigned DstBits = DstTy->getScalarSizeInBits();
+ (void)SrcBits; // Only used in assert()
+ (void)DstBits; // Only used in assert()
+
+ assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
+ "either the widths should be different, or the signednesses.");
+
+ // NOTE: zero value is considered to be non-negative.
+ auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
+ const char *Name) -> Value * {
+ // Is this value a signed type?
+ bool VSigned = VType->isSignedIntegerOrEnumerationType();
+ llvm::Type *VTy = V->getType();
+ if (!VSigned) {
+ // If the value is unsigned, then it is never negative.
+ // FIXME: can we encounter non-scalar VTy here?
+ return llvm::ConstantInt::getFalse(VTy->getContext());
+ }
+ // Get the zero of the same type with which we will be comparing.
+ llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
+ // %V.isnegative = icmp slt %V, 0
+ // I.e is %V *strictly* less than zero, does it have negative value?
+ return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
+ llvm::Twine(Name) + "." + V->getName() +
+ ".negativitycheck");
+ };
+
+ // 1. Was the old Value negative?
+ llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
+ // 2. Is the new Value negative?
+ llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
+ // 3. Now, was the 'negativity status' preserved during the conversion?
+ // NOTE: conversion from negative to zero is considered to change the sign.
+ // (We want to get 'false' when the conversion changed the sign)
+ // So we should just equality-compare the negativity statuses.
llvm::Value *Check = nullptr;
+ Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
+ // If the comparison result is 'false', then the conversion changed the sign.
+ return std::make_pair(
+ ScalarExprEmitter::ICCK_IntegerSignChange,
+ std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
+}
- // 1. Extend the truncated value back to the same width as the Src.
- bool InputSigned = DstType->isSignedIntegerOrEnumerationType();
- Check = Builder.CreateIntCast(Dst, SrcTy, InputSigned, "anyext");
- // 2. Equality-compare with the original source value
- Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
- // If the comparison result is 'i1 false', then the truncation was lossy.
+void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
+ Value *Dst, QualType DstType,
+ SourceLocation Loc) {
+ if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
+ return;
+
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = Dst->getType();
+
+ // We only care about int->int conversions here.
+ // We ignore conversions to/from pointer and/or bool.
+ if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
+ return;
+
+ bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
+ bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
+ unsigned SrcBits = SrcTy->getScalarSizeInBits();
+ unsigned DstBits = DstTy->getScalarSizeInBits();
+
+ // Now, we do not need to emit the check in *all* of the cases.
+ // We can avoid emitting it in some obvious cases where it would have been
+ // dropped by the opt passes (instcombine) always anyways.
+ // If it's a cast between effectively the same type, no check.
+ // NOTE: this is *not* equivalent to checking the canonical types.
+ if (SrcSigned == DstSigned && SrcBits == DstBits)
+ return;
+ // At least one of the values needs to have signed type.
+ // If both are unsigned, then obviously, neither of them can be negative.
+ if (!SrcSigned && !DstSigned)
+ return;
+ // If the conversion is to *larger* *signed* type, then no check is needed.
+ // Because either sign-extension happens (so the sign will remain),
+ // or zero-extension will happen (the sign bit will be zero.)
+ if ((DstBits > SrcBits) && DstSigned)
+ return;
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
+ (SrcBits > DstBits) && SrcSigned) {
+ // If the signed integer truncation sanitizer is enabled,
+ // and this is a truncation from signed type, then no check is needed.
+ // Because here sign change check is interchangeable with truncation check.
+ return;
+ }
+ // That's it. We can't rule out any more cases with the data we have.
+
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
+ std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
+ std::pair<llvm::Value *, SanitizerMask>>
+ Check;
+
+ // Each of these checks needs to return 'false' when an issue was detected.
+ ImplicitConversionCheckKind CheckKind;
+ llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
+ // So we can 'and' all the checks together, and still get 'false',
+ // if at least one of the checks detected an issue.
+
+ Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ CheckKind = Check.first;
+ Checks.emplace_back(Check.second);
+
+ if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
+ (SrcBits > DstBits) && !SrcSigned && DstSigned) {
+ // If the signed integer truncation sanitizer was enabled,
+ // and we are truncating from larger unsigned type to smaller signed type,
+ // let's handle the case we skipped in that check.
+ Check =
+ EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
+ CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
+ Checks.emplace_back(Check.second);
+ // If the comparison result is 'i1 false', then the truncation was lossy.
+ }
llvm::Constant *StaticArgs[] = {
CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
CGF.EmitCheckTypeDescriptor(DstType),
- llvm::ConstantInt::get(Builder.getInt8Ty(), ICCK_IntegerTruncation)};
- CGF.EmitCheck(std::make_pair(Check, SanitizerKind::ImplicitIntegerTruncation),
- SanitizerHandler::ImplicitConversion, StaticArgs, {Src, Dst});
+ llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
+ // EmitCheck() will 'and' all the checks together.
+ CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
+ {Src, Dst});
}
/// Emit a conversion from the specified type to the specified destination type,
@@ -991,6 +1197,27 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
QualType DstType,
SourceLocation Loc,
ScalarConversionOpts Opts) {
+ // All conversions involving fixed point types should be handled by the
+ // EmitFixedPoint family functions. This is done to prevent bloating up this
+ // function more, and although fixed point numbers are represented by
+ // integers, we do not want to follow any logic that assumes they should be
+ // treated as integers.
+ // TODO(leonardchan): When necessary, add another if statement checking for
+ // conversions to fixed point types from other types.
+ if (SrcType->isFixedPointType()) {
+ if (DstType->isFixedPointType()) {
+ return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
+ } else if (DstType->isBooleanType()) {
+ // We do not need to check the padding bit on unsigned types if unsigned
+ // padding is enabled because overflow into this bit is undefined
+ // behavior.
+ return Builder.CreateIsNotNull(Src, "tobool");
+ }
+
+ llvm_unreachable(
+ "Unhandled scalar conversion involving a fixed point type.");
+ }
+
QualType NoncanonicalSrcType = SrcType;
QualType NoncanonicalDstType = DstType;
@@ -1036,8 +1263,13 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
}
// Ignore conversions like int -> uint.
- if (SrcTy == DstTy)
+ if (SrcTy == DstTy) {
+ if (Opts.EmitImplicitIntegerSignChangeChecks)
+ EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
+ NoncanonicalDstType, Loc);
+
return Src;
+ }
// Handle pointer conversions next: pointers can only be converted to/from
// other pointers and integers. Check for pointer types in terms of LLVM, as
@@ -1181,9 +1413,91 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
NoncanonicalDstType, Loc);
+ if (Opts.EmitImplicitIntegerSignChangeChecks)
+ EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
+ NoncanonicalDstType, Loc);
+
return Res;
}
+Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
+ QualType DstTy,
+ SourceLocation Loc) {
+ using llvm::APInt;
+ using llvm::ConstantInt;
+ using llvm::Value;
+
+ assert(SrcTy->isFixedPointType());
+ assert(DstTy->isFixedPointType());
+
+ FixedPointSemantics SrcFPSema =
+ CGF.getContext().getFixedPointSemantics(SrcTy);
+ FixedPointSemantics DstFPSema =
+ CGF.getContext().getFixedPointSemantics(DstTy);
+ unsigned SrcWidth = SrcFPSema.getWidth();
+ unsigned DstWidth = DstFPSema.getWidth();
+ unsigned SrcScale = SrcFPSema.getScale();
+ unsigned DstScale = DstFPSema.getScale();
+ bool SrcIsSigned = SrcFPSema.isSigned();
+ bool DstIsSigned = DstFPSema.isSigned();
+
+ llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
+
+ Value *Result = Src;
+ unsigned ResultWidth = SrcWidth;
+
+ if (!DstFPSema.isSaturated()) {
+ // Downscale.
+ if (DstScale < SrcScale)
+ Result = SrcIsSigned ?
+ Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
+ Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+
+ // Resize.
+ Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+
+ // Upscale.
+ if (DstScale > SrcScale)
+ Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
+ } else {
+ // Adjust the number of fractional bits.
+ if (DstScale > SrcScale) {
+ ResultWidth = SrcWidth + DstScale - SrcScale;
+ llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
+ Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
+ Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
+ } else if (DstScale < SrcScale) {
+ Result = SrcIsSigned ?
+ Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
+ Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
+ }
+
+ // Handle saturation.
+ bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
+ if (LessIntBits) {
+ Value *Max = ConstantInt::get(
+ CGF.getLLVMContext(),
+ APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
+ : Builder.CreateICmpUGT(Result, Max);
+ Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
+ }
+ // Cannot overflow min to dest type if src is unsigned since all fixed
+ // point types can cover the unsigned min of 0.
+ if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
+ Value *Min = ConstantInt::get(
+ CGF.getLLVMContext(),
+ APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooLow = Builder.CreateICmpSLT(Result, Min);
+ Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
+ }
+
+ // Resize the integer part to get the final destination size.
+ Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+ }
+ return Result;
+}
+
/// Emit a conversion from the specified complex type to the specified
/// destination type, where the destination type is an LLVM scalar type.
Value *ScalarExprEmitter::EmitComplexToScalarConversion(
@@ -1405,10 +1719,11 @@ Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
CGF.EmitIgnoredExpr(E->getBase());
- return emitConstant(Constant, E);
+ return CGF.emitScalarConstant(Constant, E);
} else {
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Value = Result.Val.getInt();
CGF.EmitIgnoredExpr(E->getBase());
return Builder.getInt(Value);
}
@@ -1681,7 +1996,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getLocStart());
+ CE->getBeginLoc());
}
if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
@@ -1745,11 +2060,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Derived.getPointer(), DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
- CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(),
- Derived.getPointer(),
- /*MayBeNull=*/true,
- CodeGenFunction::CFITCK_DerivedCast,
- CE->getLocStart());
+ CGF.EmitVTablePtrCheckForCast(
+ DestTy->getPointeeType(), Derived.getPointer(),
+ /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
+ CE->getBeginLoc());
return Derived.getPointer();
}
@@ -1875,11 +2189,22 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
+ case CK_FixedPointCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
+ case CK_FixedPointToBoolean:
+ assert(E->getType()->isFixedPointType() &&
+ "Expected src type to be fixed point type");
+ assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+
case CK_IntegralCast: {
ScalarConversionOpts Opts;
- if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerTruncation)) {
- if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE))
- Opts.EmitImplicitIntegerTruncationChecks = !ICE->isPartOfExplicitCast();
+ if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ if (!ICE->isPartOfExplicitCast())
+ Opts = ScalarConversionOpts(CGF.SanOpts);
}
return EmitScalarConversion(Visit(E), E->getType(), DestTy,
CE->getExprLoc(), Opts);
@@ -1920,13 +2245,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CE->getExprLoc());
}
- case CK_ZeroToOCLEvent: {
- assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
- return llvm::Constant::getNullValue(ConvertType(DestTy));
- }
-
- case CK_ZeroToOCLQueue: {
- assert(DestTy->isQueueT() && "CK_ZeroToOCLQueue cast on non queue_t type");
+ case CK_ZeroToOCLOpaqueType: {
+ assert((DestTy->isEventT() || DestTy->isQueueT() ||
+ DestTy->isOCLIntelSubgroupAVCType()) &&
+ "CK_ZeroToOCLEvent cast on non-event type");
return llvm::Constant::getNullValue(ConvertType(DestTy));
}
@@ -1985,7 +2307,7 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, Amount, Name);
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
@@ -2280,9 +2602,11 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Try folding the offsetof to a constant.
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, CGF.getContext()))
+ Expr::EvalResult EVResult;
+ if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
+ llvm::APSInt Value = EVResult.Val.getInt();
return Builder.getInt(Value);
+ }
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
@@ -2551,9 +2875,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
- // Convert the result back to the LHS type.
- Result =
- EmitScalarConversion(Result, E->getComputationResultType(), LHSTy, Loc);
+ // Convert the result back to the LHS type,
+ // potentially with Implicit Conversion sanitizer check.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
+ Loc, ScalarConversionOpts(CGF.SanOpts));
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
@@ -2991,7 +3316,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
@@ -3026,7 +3351,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
- // Fall through.
+ LLVM_FALLTHROUGH;
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index 21e2b8dd8c31..fd0a9c773a2e 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -10,8 +10,8 @@
#include "CGLoopInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
-#include "clang/Sema/LoopHint.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
@@ -21,14 +21,17 @@ using namespace llvm;
static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
const llvm::DebugLoc &StartLoc,
- const llvm::DebugLoc &EndLoc) {
+ const llvm::DebugLoc &EndLoc, MDNode *&AccGroup) {
if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
+ Attrs.UnrollAndJamCount == 0 && !Attrs.PipelineDisabled &&
+ Attrs.PipelineInitiationInterval == 0 &&
Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
Attrs.UnrollEnable == LoopAttributes::Unspecified &&
- Attrs.DistributeEnable == LoopAttributes::Unspecified &&
- !StartLoc && !EndLoc)
+ Attrs.UnrollAndJamEnable == LoopAttributes::Unspecified &&
+ Attrs.DistributeEnable == LoopAttributes::Unspecified && !StartLoc &&
+ !EndLoc)
return nullptr;
SmallVector<Metadata *, 4> Args;
@@ -61,7 +64,7 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
Args.push_back(MDNode::get(Ctx, Vals));
}
- // Setting interleave.count
+ // Setting unroll.count
if (Attrs.UnrollCount > 0) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.count"),
ConstantAsMetadata::get(ConstantInt::get(
@@ -69,6 +72,14 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
Args.push_back(MDNode::get(Ctx, Vals));
}
+ // Setting unroll_and_jam.count
+ if (Attrs.UnrollAndJamCount > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll_and_jam.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.UnrollAndJamCount))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
// Setting vectorize.enable
if (Attrs.VectorizeEnable != LoopAttributes::Unspecified) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.enable"),
@@ -91,6 +102,19 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
Args.push_back(MDNode::get(Ctx, Vals));
}
+ // Setting unroll_and_jam.full or unroll_and_jam.disable
+ if (Attrs.UnrollAndJamEnable != LoopAttributes::Unspecified) {
+ std::string Name;
+ if (Attrs.UnrollAndJamEnable == LoopAttributes::Enable)
+ Name = "llvm.loop.unroll_and_jam.enable";
+ else if (Attrs.UnrollAndJamEnable == LoopAttributes::Full)
+ Name = "llvm.loop.unroll_and_jam.full";
+ else
+ Name = "llvm.loop.unroll_and_jam.disable";
+ Metadata *Vals[] = {MDString::get(Ctx, Name)};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
if (Attrs.DistributeEnable != LoopAttributes::Unspecified) {
Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.distribute.enable"),
ConstantAsMetadata::get(ConstantInt::get(
@@ -99,6 +123,28 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
Args.push_back(MDNode::get(Ctx, Vals));
}
+ if (Attrs.IsParallel) {
+ AccGroup = MDNode::getDistinct(Ctx, {});
+ Args.push_back(MDNode::get(
+ Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup}));
+ }
+
+ if (Attrs.PipelineDisabled) {
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.pipeline.disable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt1Ty(Ctx), (Attrs.PipelineDisabled == true)))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
+ if (Attrs.PipelineInitiationInterval > 0) {
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.pipeline.initiationinterval"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.PipelineInitiationInterval))};
+ Args.push_back(MDNode::get(Ctx, Vals));
+ }
+
// Set the first operand to itself.
MDNode *LoopID = MDNode::get(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
@@ -107,24 +153,31 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
LoopAttributes::LoopAttributes(bool IsParallel)
: IsParallel(IsParallel), VectorizeEnable(LoopAttributes::Unspecified),
- UnrollEnable(LoopAttributes::Unspecified), VectorizeWidth(0),
- InterleaveCount(0), UnrollCount(0),
- DistributeEnable(LoopAttributes::Unspecified) {}
+ UnrollEnable(LoopAttributes::Unspecified),
+ UnrollAndJamEnable(LoopAttributes::Unspecified), VectorizeWidth(0),
+ InterleaveCount(0), UnrollCount(0), UnrollAndJamCount(0),
+ DistributeEnable(LoopAttributes::Unspecified), PipelineDisabled(false),
+ PipelineInitiationInterval(0) {}
void LoopAttributes::clear() {
IsParallel = false;
VectorizeWidth = 0;
InterleaveCount = 0;
UnrollCount = 0;
+ UnrollAndJamCount = 0;
VectorizeEnable = LoopAttributes::Unspecified;
UnrollEnable = LoopAttributes::Unspecified;
+ UnrollAndJamEnable = LoopAttributes::Unspecified;
DistributeEnable = LoopAttributes::Unspecified;
+ PipelineDisabled = false;
+ PipelineInitiationInterval = 0;
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
: LoopID(nullptr), Header(Header), Attrs(Attrs) {
- LoopID = createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc);
+ LoopID =
+ createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc, AccGroup);
}
void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
@@ -191,12 +244,20 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::Unroll:
setUnrollState(LoopAttributes::Disable);
break;
+ case LoopHintAttr::UnrollAndJam:
+ setUnrollAndJamState(LoopAttributes::Disable);
+ break;
case LoopHintAttr::Distribute:
setDistributeState(false);
break;
+ case LoopHintAttr::PipelineDisabled:
+ setPipelineDisabled(true);
+ break;
case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::UnrollAndJamCount:
case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::InterleaveCount:
+ case LoopHintAttr::PipelineInitiationInterval:
llvm_unreachable("Options cannot be disabled.");
break;
}
@@ -210,12 +271,18 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::Unroll:
setUnrollState(LoopAttributes::Enable);
break;
+ case LoopHintAttr::UnrollAndJam:
+ setUnrollAndJamState(LoopAttributes::Enable);
+ break;
case LoopHintAttr::Distribute:
setDistributeState(true);
break;
case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::UnrollAndJamCount:
case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::InterleaveCount:
+ case LoopHintAttr::PipelineDisabled:
+ case LoopHintAttr::PipelineInitiationInterval:
llvm_unreachable("Options cannot enabled.");
break;
}
@@ -229,10 +296,14 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
setVectorizeEnable(true);
break;
case LoopHintAttr::Unroll:
+ case LoopHintAttr::UnrollAndJam:
case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::UnrollAndJamCount:
case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::InterleaveCount:
case LoopHintAttr::Distribute:
+ case LoopHintAttr::PipelineDisabled:
+ case LoopHintAttr::PipelineInitiationInterval:
llvm_unreachable("Options cannot be used to assume mem safety.");
break;
}
@@ -242,12 +313,18 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::Unroll:
setUnrollState(LoopAttributes::Full);
break;
+ case LoopHintAttr::UnrollAndJam:
+ setUnrollAndJamState(LoopAttributes::Full);
+ break;
case LoopHintAttr::Vectorize:
case LoopHintAttr::Interleave:
case LoopHintAttr::UnrollCount:
+ case LoopHintAttr::UnrollAndJamCount:
case LoopHintAttr::VectorizeWidth:
case LoopHintAttr::InterleaveCount:
case LoopHintAttr::Distribute:
+ case LoopHintAttr::PipelineDisabled:
+ case LoopHintAttr::PipelineInitiationInterval:
llvm_unreachable("Options cannot be used with 'full' hint.");
break;
}
@@ -263,10 +340,18 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
case LoopHintAttr::UnrollCount:
setUnrollCount(ValueInt);
break;
+ case LoopHintAttr::UnrollAndJamCount:
+ setUnrollAndJamCount(ValueInt);
+ break;
+ case LoopHintAttr::PipelineInitiationInterval:
+ setPipelineInitiationInterval(ValueInt);
+ break;
case LoopHintAttr::Unroll:
+ case LoopHintAttr::UnrollAndJam:
case LoopHintAttr::Vectorize:
case LoopHintAttr::Interleave:
case LoopHintAttr::Distribute:
+ case LoopHintAttr::PipelineDisabled:
llvm_unreachable("Options cannot be assigned a value.");
break;
}
@@ -284,6 +369,21 @@ void LoopInfoStack::pop() {
}
void LoopInfoStack::InsertHelper(Instruction *I) const {
+ if (I->mayReadOrWriteMemory()) {
+ SmallVector<Metadata *, 4> AccessGroups;
+ for (const LoopInfo &AL : Active) {
+ // Here we assume that every loop that has an access group is parallel.
+ if (MDNode *Group = AL.getAccessGroup())
+ AccessGroups.push_back(Group);
+ }
+ MDNode *UnionMD = nullptr;
+ if (AccessGroups.size() == 1)
+ UnionMD = cast<MDNode>(AccessGroups[0]);
+ else if (AccessGroups.size() >= 2)
+ UnionMD = MDNode::get(I->getContext(), AccessGroups);
+ I->setMetadata("llvm.access.group", UnionMD);
+ }
+
if (!hasInfo())
return;
@@ -291,15 +391,12 @@ void LoopInfoStack::InsertHelper(Instruction *I) const {
if (!L.getLoopID())
return;
- if (TerminatorInst *TI = dyn_cast<TerminatorInst>(I)) {
- for (unsigned i = 0, ie = TI->getNumSuccessors(); i < ie; ++i)
- if (TI->getSuccessor(i) == L.getHeader()) {
- TI->setMetadata(llvm::LLVMContext::MD_loop, L.getLoopID());
+ if (I->isTerminator()) {
+ for (BasicBlock *Succ : successors(I))
+ if (Succ == L.getHeader()) {
+ I->setMetadata(llvm::LLVMContext::MD_loop, L.getLoopID());
break;
}
return;
}
-
- if (L.getAttributes().IsParallel && I->mayReadOrWriteMemory())
- I->setMetadata("llvm.mem.parallel_loop_access", L.getLoopID());
}
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 9d5f23ff9a2a..84ba03bfb00b 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -49,6 +49,9 @@ struct LoopAttributes {
/// Value for llvm.loop.unroll.* metadata (enable, disable, or full).
LVEnableState UnrollEnable;
+ /// Value for llvm.loop.unroll_and_jam.* metadata (enable, disable, or full).
+ LVEnableState UnrollAndJamEnable;
+
/// Value for llvm.loop.vectorize.width metadata.
unsigned VectorizeWidth;
@@ -58,8 +61,17 @@ struct LoopAttributes {
/// llvm.unroll.
unsigned UnrollCount;
+ /// llvm.unroll.
+ unsigned UnrollAndJamCount;
+
/// Value for llvm.loop.distribute.enable metadata.
LVEnableState DistributeEnable;
+
+ /// Value for llvm.loop.pipeline.disable metadata.
+ bool PipelineDisabled;
+
+ /// Value for llvm.loop.pipeline.iicount metadata.
+ unsigned PipelineInitiationInterval;
};
/// Information used when generating a structured loop.
@@ -78,6 +90,9 @@ public:
/// Get the set of attributes active for this loop.
const LoopAttributes &getAttributes() const { return Attrs; }
+ /// Return this loop's access group or nullptr if it does not have one.
+ llvm::MDNode *getAccessGroup() const { return AccGroup; }
+
private:
/// Loop ID metadata.
llvm::MDNode *LoopID;
@@ -85,6 +100,8 @@ private:
llvm::BasicBlock *Header;
/// The attributes for this loop.
LoopAttributes Attrs;
+ /// The access group for memory accesses parallel to this loop.
+ llvm::MDNode *AccGroup = nullptr;
};
/// A stack of loop information corresponding to loop nesting levels.
@@ -143,6 +160,11 @@ public:
StagedAttrs.UnrollEnable = State;
}
+ /// Set the next pushed loop unroll_and_jam state.
+ void setUnrollAndJamState(const LoopAttributes::LVEnableState &State) {
+ StagedAttrs.UnrollAndJamEnable = State;
+ }
+
/// Set the vectorize width for the next loop pushed.
void setVectorizeWidth(unsigned W) { StagedAttrs.VectorizeWidth = W; }
@@ -152,6 +174,17 @@ public:
/// Set the unroll count for the next loop pushed.
void setUnrollCount(unsigned C) { StagedAttrs.UnrollCount = C; }
+ /// \brief Set the unroll count for the next loop pushed.
+ void setUnrollAndJamCount(unsigned C) { StagedAttrs.UnrollAndJamCount = C; }
+
+ /// Set the pipeline disabled state.
+ void setPipelineDisabled(bool S) { StagedAttrs.PipelineDisabled = S; }
+
+ /// Set the pipeline initiation interval.
+ void setPipelineInitiationInterval(unsigned C) {
+ StagedAttrs.PipelineInitiationInterval = C;
+ }
+
private:
/// Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
diff --git a/lib/CodeGen/CGNonTrivialStruct.cpp b/lib/CodeGen/CGNonTrivialStruct.cpp
index 922e0934b866..c6a96a912622 100644
--- a/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -187,6 +187,7 @@ template <class Derived> struct GenFuncNameBase {
if (!FK)
return asDerived().visitTrivial(QualType(AT, 0), FD, CurStructOffset);
+ asDerived().flushTrivialFields();
CharUnits FieldOffset = CurStructOffset + asDerived().getFieldOffset(FD);
ASTContext &Ctx = asDerived().getContext();
const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
@@ -283,8 +284,9 @@ struct GenDefaultInitializeFuncName
struct GenDestructorFuncName : GenUnaryFuncName<GenDestructorFuncName>,
DestructedTypeVisitor<GenDestructorFuncName> {
using Super = DestructedTypeVisitor<GenDestructorFuncName>;
- GenDestructorFuncName(CharUnits DstAlignment, ASTContext &Ctx)
- : GenUnaryFuncName<GenDestructorFuncName>("__destructor_", DstAlignment,
+ GenDestructorFuncName(const char *Prefix, CharUnits DstAlignment,
+ ASTContext &Ctx)
+ : GenUnaryFuncName<GenDestructorFuncName>(Prefix, DstAlignment,
Ctx) {}
void visitWithKind(QualType::DestructionKind DK, QualType FT,
const FieldDecl *FD, CharUnits CurStructOffset) {
@@ -335,6 +337,7 @@ template <class Derived> struct GenFuncBase {
return asDerived().visitTrivial(QualType(AT, 0), FD, CurStackOffset,
Addrs);
+ asDerived().flushTrivialFields(Addrs);
CodeGenFunction &CGF = *this->CGF;
ASTContext &Ctx = CGF.getContext();
@@ -455,12 +458,13 @@ template <class Derived> struct GenFuncBase {
llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
F->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(nullptr, FI, F);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
IdentifierInfo *II = &Ctx.Idents.get(FuncName);
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- II, Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
+ II, Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {}), nullptr,
+ SC_PrivateExtern, false, false);
CodeGenFunction NewCGF(CGM);
setCGF(&NewCGF);
CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
@@ -824,11 +828,28 @@ void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
IsVolatile, *this, std::array<Address, 1>({{DstPtr}}));
}
+std::string
+CodeGenFunction::getNonTrivialCopyConstructorStr(QualType QT,
+ CharUnits Alignment,
+ bool IsVolatile,
+ ASTContext &Ctx) {
+ GenBinaryFuncName<false> GenName("", Alignment, Alignment, Ctx);
+ return GenName.getName(QT, IsVolatile);
+}
+
+std::string
+CodeGenFunction::getNonTrivialDestructorStr(QualType QT, CharUnits Alignment,
+ bool IsVolatile, ASTContext &Ctx) {
+ GenDestructorFuncName GenName("", Alignment, Ctx);
+ return GenName.getName(QT, IsVolatile);
+}
+
void CodeGenFunction::callCStructDestructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
- GenDestructorFuncName GenName(DstPtr.getAlignment(), getContext());
+ GenDestructorFuncName GenName("__destructor_", DstPtr.getAlignment(),
+ getContext());
std::string FuncName = GenName.getName(QT, IsVolatile);
callSpecialFunction(GenDestructor(getContext()), FuncName, QT, IsVolatile,
*this, std::array<Address, 1>({{DstPtr}}));
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index b94bbf2a384f..9c66ff0e8fb2 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -127,7 +127,7 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
llvm::Constant *Constant =
CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
- llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getLocStart());
+ llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
cast<llvm::LoadInst>(Ptr)->setMetadata(
CGM.getModule().getMDKindID("invariant.load"),
llvm::MDNode::get(getLLVMContext(), None));
@@ -352,6 +352,81 @@ static const Expr *findWeakLValue(const Expr *E) {
return nullptr;
}
+/// The ObjC runtime may provide entrypoints that are likely to be faster
+/// than an ordinary message send of the appropriate selector.
+///
+/// The entrypoints are guaranteed to be equivalent to just sending the
+/// corresponding message. If the entrypoint is implemented naively as just a
+/// message send, using it is a trade-off: it sacrifices a few cycles of
+/// overhead to save a small amount of code. However, it's possible for
+/// runtimes to detect and special-case classes that use "standard"
+/// behavior; if that's dynamically a large proportion of all objects, using
+/// the entrypoint will also be faster than using a message send.
+///
+/// If the runtime does support a required entrypoint, then this method will
+/// generate a call and return the resulting value. Otherwise it will return
+/// None and the caller can generate a msgSend instead.
+static Optional<llvm::Value *>
+tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
+ llvm::Value *Receiver,
+ const CallArgList& Args, Selector Sel,
+ const ObjCMethodDecl *method,
+ bool isClassMessage) {
+ auto &CGM = CGF.CGM;
+ if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
+ return None;
+
+ auto &Runtime = CGM.getLangOpts().ObjCRuntime;
+ switch (Sel.getMethodFamily()) {
+ case OMF_alloc:
+ if (isClassMessage &&
+ Runtime.shouldUseRuntimeFunctionsForAlloc() &&
+ ResultType->isObjCObjectPointerType()) {
+ // [Foo alloc] -> objc_alloc(Foo)
+ if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc")
+ return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType));
+ // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo)
+ if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 &&
+ Args.size() == 1 && Args.front().getType()->isPointerType() &&
+ Sel.getNameForSlot(0) == "allocWithZone") {
+ const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal();
+ if (isa<llvm::ConstantPointerNull>(arg))
+ return CGF.EmitObjCAllocWithZone(Receiver,
+ CGF.ConvertType(ResultType));
+ return None;
+ }
+ }
+ break;
+
+ case OMF_autorelease:
+ if (ResultType->isObjCObjectPointerType() &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ Runtime.shouldUseARCFunctionsForRetainRelease())
+ return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType));
+ break;
+
+ case OMF_retain:
+ if (ResultType->isObjCObjectPointerType() &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ Runtime.shouldUseARCFunctionsForRetainRelease())
+ return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType));
+ break;
+
+ case OMF_release:
+ if (ResultType->isVoidType() &&
+ CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ Runtime.shouldUseARCFunctionsForRetainRelease()) {
+ CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime);
+ return nullptr;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return None;
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -474,10 +549,17 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Args,
method);
} else {
- result = Runtime.GenerateMessageSend(*this, Return, ResultType,
- E->getSelector(),
- Receiver, Args, OID,
- method);
+ // Call runtime methods directly if we can.
+ if (Optional<llvm::Value *> SpecializedResult =
+ tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args,
+ E->getSelector(), method,
+ isClassMessage)) {
+ result = RValue::get(SpecializedResult.getValue());
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(), Receiver, Args,
+ OID, method);
+ }
}
// For delegate init calls in ARC, implicitly store the result of
@@ -531,7 +613,7 @@ struct FinishARCDealloc final : EHScopeStack::Cleanup {
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
- SourceLocation StartLoc = OMD->getLocStart();
+ SourceLocation StartLoc = OMD->getBeginLoc();
FunctionArgList args;
// Check if we should generate debug info for this method.
if (OMD->hasAttr<NoDebugAttr>())
@@ -548,7 +630,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
args.append(OMD->param_begin(), OMD->param_end());
CurGD = OMD;
- CurEHLocation = OMD->getLocEnd();
+ CurEHLocation = OMD->getEndLoc();
StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
OMD->getLocation(), StartLoc);
@@ -568,7 +650,7 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue, QualType type);
/// Generate an Objective-C method. An Objective-C method is a C function with
-/// its pointer, name, and types registered in the class struture.
+/// its pointer, name, and types registered in the class structure.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
StartObjCMethod(OMD, OMD->getClassInterface());
PGO.assignRegionCounters(GlobalDecl(OMD), CurFn);
@@ -883,9 +965,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
if (!AtomicHelperFn) {
- ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
- /*nrvo*/ nullptr);
- EmitReturnStmt(ret);
+ auto *ret = ReturnStmt::Create(getContext(), SourceLocation(),
+ propImpl->getGetterCXXConstructor(),
+ /* NRVOCandidate=*/nullptr);
+ EmitReturnStmt(*ret);
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
@@ -1068,8 +1151,9 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
- DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
- VK_LValue, SourceLocation());
+ DeclRefExpr argRef(CGF.getContext(), argVar, false,
+ argVar->getType().getNonReferenceType(), VK_LValue,
+ SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1113,8 +1197,9 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
- DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
- VK_LValue, SourceLocation());
+ DeclRefExpr argRef(CGF.getContext(), argVar, false,
+ argVar->getType().getNonReferenceType(), VK_LValue,
+ SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
@@ -1286,7 +1371,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Otherwise, fake up some ASTs and emit a normal assignment.
ValueDecl *selfDecl = setterMethod->getSelfDecl();
- DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
VK_LValue, SourceLocation());
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
selfDecl->getType(), CK_LValueToRValue, &self,
@@ -1297,7 +1382,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
ParmVarDecl *argDecl = *setterMethod->param_begin();
QualType argType = argDecl->getType().getNonReferenceType();
- DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
+ DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue,
+ SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
&arg, VK_RValue);
@@ -1459,7 +1545,8 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
llvm::Value *CodeGenFunction::LoadObjCSelf() {
VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
- DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
+ DeclRefExpr DRE(getContext(), Self,
+ /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
Self->getType(), VK_LValue, SourceLocation());
return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
}
@@ -1645,9 +1732,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Initialize the variable, in case it's a __block variable or something.
EmitAutoVarInit(variable);
- const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
- DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
- VK_LValue, SourceLocation());
+ const VarDecl *D = cast<VarDecl>(SD->getSingleDecl());
+ DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false,
+ D->getType(), VK_LValue, SourceLocation());
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
elementIsVariable = true;
@@ -1805,23 +1892,16 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
/// being intrinsically used up until this point in the program.
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use;
- if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(CGM.VoidTy, None, true);
- fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
- }
+ if (!fn)
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use);
// This isn't really a "runtime" function, but as an intrinsic it
// doesn't really matter as long as we align things up.
EmitNounwindRuntimeCall(fn, values);
}
-
-static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
- llvm::FunctionType *FTy,
- StringRef Name) {
- llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name);
-
+static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
+ llvm::Constant *RTF) {
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
@@ -1829,14 +1909,8 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
!CGM.getTriple().isOSBinFormatCOFF()) {
F->setLinkage(llvm::Function::ExternalWeakLinkage);
- } else if (Name == "objc_retain" || Name == "objc_release") {
- // If we have Native ARC, set nonlazybind attribute for these APIs for
- // performance.
- F->addFnAttr(llvm::Attribute::NonLazyBind);
}
}
-
- return RTF;
}
/// Perform an operation having the signature
@@ -1844,20 +1918,20 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
/// where a null input causes a no-op and returns null.
static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
+ llvm::Type *returnType,
llvm::Constant *&fn,
- StringRef fnName,
+ llvm::Intrinsic::ID IntID,
bool isTailCall = false) {
if (isa<llvm::ConstantPointerNull>(value))
return value;
if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
- fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ fn = CGF.CGM.getIntrinsic(IntID);
+ setARCRuntimeFunctionLinkage(CGF.CGM, fn);
}
// Cast the argument to 'id'.
- llvm::Type *origType = value->getType();
+ llvm::Type *origType = returnType ? returnType : value->getType();
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
@@ -1874,11 +1948,10 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
Address addr,
llvm::Constant *&fn,
- StringRef fnName) {
+ llvm::Intrinsic::ID IntID) {
if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false);
- fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ fn = CGF.CGM.getIntrinsic(IntID);
+ setARCRuntimeFunctionLinkage(CGF.CGM, fn);
}
// Cast the argument to 'id*'.
@@ -1901,16 +1974,13 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
Address addr,
llvm::Value *value,
llvm::Constant *&fn,
- StringRef fnName,
+ llvm::Intrinsic::ID IntID,
bool ignored) {
assert(addr.getElementType() == value->getType());
if (!fn) {
- llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
-
- llvm::FunctionType *fnType
- = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
- fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ fn = CGF.CGM.getIntrinsic(IntID);
+ setARCRuntimeFunctionLinkage(CGF.CGM, fn);
}
llvm::Type *origType = value->getType();
@@ -1932,15 +2002,12 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
Address dst,
Address src,
llvm::Constant *&fn,
- StringRef fnName) {
+ llvm::Intrinsic::ID IntID) {
assert(dst.getType() == src.getType());
if (!fn) {
- llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
-
- llvm::FunctionType *fnType
- = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
- fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ fn = CGF.CGM.getIntrinsic(IntID);
+ setARCRuntimeFunctionLinkage(CGF.CGM, fn);
}
llvm::Value *args[] = {
@@ -1950,6 +2017,39 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(fn, args);
}
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Type *returnType,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value))
+ return value;
+
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
+ fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName);
+
+ // We have Native ARC, so set nonlazybind attribute for performance
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ if (fnName == "objc_retain")
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
+
+ // Cast the argument to 'id'.
+ llvm::Type *origType = returnType ? returnType : value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
/// Produce the code to do a retain. Based on the type, calls one of:
/// call i8* \@objc_retain(i8* %value)
/// call i8* \@objc_retainBlock(i8* %value)
@@ -1963,9 +2063,9 @@ llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
/// Retain the given object, with normal retain semantics.
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retain,
- "objc_retain");
+ llvm::Intrinsic::objc_retain);
}
/// Retain the given block, with _Block_copy semantics.
@@ -1977,9 +2077,9 @@ llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
bool mandatory) {
llvm::Value *result
- = emitARCValueOperation(*this, value,
+ = emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainBlock,
- "objc_retainBlock");
+ llvm::Intrinsic::objc_retainBlock);
// If the copy isn't mandatory, add !clang.arc.copy_on_escape to
// tell the optimizer that it doesn't need to do this copy if the
@@ -2047,9 +2147,9 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
- "objc_retainAutoreleasedReturnValue");
+ llvm::Intrinsic::objc_retainAutoreleasedReturnValue);
}
/// Claim a possibly-autoreleased return value at +0. This is only
@@ -2062,9 +2162,9 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
llvm::Value *
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
emitAutoreleasedReturnValueMarker(*this);
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
- "objc_unsafeClaimAutoreleasedReturnValue");
+ llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue);
}
/// Release the given object.
@@ -2075,9 +2175,8 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
- fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
+ setARCRuntimeFunctionLinkage(CGM, fn);
}
// Cast the argument to 'id'.
@@ -2122,10 +2221,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
if (!fn) {
- llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
- llvm::FunctionType *fnType
- = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
- fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
+ setARCRuntimeFunctionLinkage(CGM, fn);
}
llvm::Value *args[] = {
@@ -2177,18 +2274,18 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
/// Autorelease the given object.
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autorelease,
- "objc_autorelease");
+ llvm::Intrinsic::objc_autorelease);
}
/// Autorelease the given object.
/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_autoreleaseReturnValue,
- "objc_autoreleaseReturnValue",
+ llvm::Intrinsic::objc_autoreleaseReturnValue,
/*isTailCall*/ true);
}
@@ -2196,9 +2293,9 @@ CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue,
- "objc_retainAutoreleaseReturnValue",
+ llvm::Intrinsic::objc_retainAutoreleaseReturnValue,
/*isTailCall*/ true);
}
@@ -2225,9 +2322,9 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
/// call i8* \@objc_retainAutorelease(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
+ return emitARCValueOperation(*this, value, nullptr,
CGM.getObjCEntrypoints().objc_retainAutorelease,
- "objc_retainAutorelease");
+ llvm::Intrinsic::objc_retainAutorelease);
}
/// i8* \@objc_loadWeak(i8** %addr)
@@ -2235,14 +2332,14 @@ CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) {
return emitARCLoadOperation(*this, addr,
CGM.getObjCEntrypoints().objc_loadWeak,
- "objc_loadWeak");
+ llvm::Intrinsic::objc_loadWeak);
}
/// i8* \@objc_loadWeakRetained(i8** %addr)
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) {
return emitARCLoadOperation(*this, addr,
CGM.getObjCEntrypoints().objc_loadWeakRetained,
- "objc_loadWeakRetained");
+ llvm::Intrinsic::objc_loadWeakRetained);
}
/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
@@ -2252,7 +2349,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr,
bool ignored) {
return emitARCStoreOperation(*this, addr, value,
CGM.getObjCEntrypoints().objc_storeWeak,
- "objc_storeWeak", ignored);
+ llvm::Intrinsic::objc_storeWeak, ignored);
}
/// i8* \@objc_initWeak(i8** %addr, i8* %value)
@@ -2272,7 +2369,7 @@ void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
emitARCStoreOperation(*this, addr, value,
CGM.getObjCEntrypoints().objc_initWeak,
- "objc_initWeak", /*ignored*/ true);
+ llvm::Intrinsic::objc_initWeak, /*ignored*/ true);
}
/// void \@objc_destroyWeak(i8** %addr)
@@ -2280,9 +2377,8 @@ void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
- fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
+ setARCRuntimeFunctionLinkage(CGM, fn);
}
// Cast the argument to 'id*'.
@@ -2297,7 +2393,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
CGM.getObjCEntrypoints().objc_moveWeak,
- "objc_moveWeak");
+ llvm::Intrinsic::objc_moveWeak);
}
/// void \@objc_copyWeak(i8** %dest, i8** %src)
@@ -2306,7 +2402,7 @@ void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) {
void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) {
emitARCCopyOperation(*this, dst, src,
CGM.getObjCEntrypoints().objc_copyWeak,
- "objc_copyWeak");
+ llvm::Intrinsic::objc_copyWeak);
}
void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr,
@@ -2329,9 +2425,8 @@ void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(Int8PtrTy, false);
- fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
+ setARCRuntimeFunctionLinkage(CGM, fn);
}
return EmitNounwindRuntimeCall(fn);
@@ -2342,18 +2437,28 @@ llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
assert(value->getType() == Int8PtrTy);
- llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
- if (!fn) {
- llvm::FunctionType *fnType =
- llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
+ if (getInvokeDest()) {
+ // Call the runtime method not the intrinsic if we are handling exceptions
+ llvm::Constant *&fn =
+ CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
+ fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop");
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ }
- // We don't want to use a weak import here; instead we should not
- // fall into this path.
- fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
- }
+ // objc_autoreleasePoolPop can throw.
+ EmitRuntimeCallOrInvoke(fn, value);
+ } else {
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ }
- // objc_autoreleasePoolPop can throw.
- EmitRuntimeCallOrInvoke(fn, value);
+ EmitRuntimeCall(fn, value);
+ }
}
/// Produce the code to do an MRR version objc_autoreleasepool_push.
@@ -2384,6 +2489,24 @@ llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
return InitRV.getScalarVal();
}
+/// Allocate the given objc object.
+/// call i8* \@objc_alloc(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitObjCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_alloc,
+ "objc_alloc");
+}
+
+/// Allocate the given objc object.
+/// call i8* \@objc_allocWithZone(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value,
+ llvm::Type *resultType) {
+ return emitObjCValueOperation(*this, value, resultType,
+ CGM.getObjCEntrypoints().objc_allocWithZone,
+ "objc_allocWithZone");
+}
+
/// Produce the code to do a primitive release.
/// [tmp drain];
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
@@ -2418,6 +2541,55 @@ void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr,
CGF.EmitARCIntrinsicUse(value);
}
+/// Autorelease the given object.
+/// call i8* \@objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value,
+ llvm::Type *returnType) {
+ return emitObjCValueOperation(*this, value, returnType,
+ CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction,
+ "objc_autorelease");
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* \@objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value,
+ llvm::Type *returnType) {
+ return emitObjCValueOperation(*this, value, returnType,
+ CGM.getObjCEntrypoints().objc_retainRuntimeFunction,
+ "objc_retain");
+}
+
+/// Release the given object.
+/// call void \@objc_release(i8* %value)
+void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
+ ARCPreciseLifetime_t precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release;
+ if (!fn) {
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
+ fn = CGM.CreateRuntimeFunction(fnType, "objc_release");
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ // We have Native ARC, so set nonlazybind attribute for performance
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
+
+ if (precise == ARCImpreciseLifetime) {
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), None));
+ }
+}
+
namespace {
struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup {
llvm::Value *Token;
@@ -2446,27 +2618,36 @@ void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
}
-static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
- LValue lvalue,
- QualType type) {
- switch (type.getObjCLifetime()) {
+static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
- return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
- SourceLocation()).getScalarVal(),
- false);
+ return true;
case Qualifiers::OCL_Weak:
- return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
- true);
+ return false;
}
llvm_unreachable("impossible lifetime!");
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ llvm::Value *result;
+ bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime());
+ if (shouldRetain) {
+ result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
+ } else {
+ assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
+ result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
+ }
+ return TryEmitResult(result, !shouldRetain);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
const Expr *e) {
e = e->IgnoreParens();
QualType type = e->getType();
@@ -2500,6 +2681,16 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
return TryEmitResult(CGF.EmitScalarExpr(e), false);
+ // Try to emit code for scalar constant instead of emitting LValue and
+ // loading it because we are not guaranteed to have an l-value. One of such
+ // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable.
+ if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) {
+ auto *DRE = const_cast<DeclRefExpr *>(decl_expr);
+ if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE))
+ return TryEmitResult(CGF.emitScalarConstant(constant, DRE),
+ !shouldRetainObjCLifetime(type.getObjCLifetime()));
+ }
+
return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
}
@@ -3229,29 +3420,32 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
ASTContext &C = getContext();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ QualType ReturnTy = C.VoidTy;
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(DestTy);
+ ArgTys.push_back(SrcTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- DestTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
+ ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- SrcTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
+ ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
@@ -3262,25 +3456,25 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr DstExpr(&DstDecl, false, DestTy,
- VK_RValue, SourceLocation());
+ DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
+ SourceLocation());
UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation(), false);
- DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
- VK_RValue, SourceLocation());
+ DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
+ SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation(), false);
Expr *Args[2] = { &DST, &SRC };
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
- CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
- Args, DestTy->getPointeeType(),
- VK_LValue, SourceLocation(), FPOptions());
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation(), FPOptions());
- EmitStmt(&TheCall);
+ EmitStmt(TheCall);
FinishFunction();
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
@@ -3301,53 +3495,54 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
-
if (hasTrivialGetExpr(PID))
return nullptr;
assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
return HelperFn;
-
ASTContext &C = getContext();
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
- FunctionDecl *FD = FunctionDecl::Create(C,
- C.getTranslationUnitDecl(),
- SourceLocation(),
- SourceLocation(), II, C.VoidTy,
- nullptr, SC_Static,
- false,
- false);
+ IdentifierInfo *II =
+ &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ QualType ReturnTy = C.VoidTy;
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
+ SmallVector<QualType, 2> ArgTys;
+ ArgTys.push_back(DestTy);
+ ArgTys.push_back(SrcTy);
+ QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
+
+ FunctionDecl *FD = FunctionDecl::Create(
+ C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
+ FunctionTy, nullptr, SC_Static, false, false);
+
FunctionArgList args;
- ImplicitParamDecl DstDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- DestTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
+ ImplicitParamDecl::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(getContext(), FD, SourceLocation(), /*Id=*/nullptr,
- SrcTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
+ ImplicitParamDecl::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args);
+ CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- "__copy_helper_atomic_property_", &CGM.getModule());
+ llvm::Function *Fn = llvm::Function::Create(
+ LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_",
+ &CGM.getModule());
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- StartFunction(FD, C.VoidTy, Fn, FI, args);
+ StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy,
- VK_RValue, SourceLocation());
+ DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
+ SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation(), false);
@@ -3372,8 +3567,8 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CXXConstExpr->getConstructionKind(),
SourceRange());
- DeclRefExpr DstExpr(&DstDecl, false, DestTy,
- VK_RValue, SourceLocation());
+ DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
+ SourceLocation());
RValue DV = EmitAnyExpr(&DstExpr);
CharUnits Alignment
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 3e994edc976b..548bd6b3fd72 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -18,6 +18,7 @@
#include "CGCleanup.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGCXXABI.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -178,6 +179,9 @@ protected:
/// runtime provides some LLVM passes that can use this to do things like
/// automatic IMP caching and speculative inlining.
unsigned msgSendMDKind;
+ /// Does the current target use SEH-based exceptions? False implies
+ /// Itanium-style DWARF unwinding.
+ bool usesSEHExceptions;
/// Helper to check if we are targeting a specific runtime version or later.
bool isRuntime(ObjCRuntime::Kind kind, unsigned major, unsigned minor=0) {
@@ -217,6 +221,7 @@ protected:
llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
auto *GV = new llvm::GlobalVariable(TheModule, value->getType(), true,
llvm::GlobalValue::LinkOnceODRLinkage, value, name);
+ GV->setComdat(TheModule.getOrInsertComdat(name));
if (Private)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
ConstStr = GV;
@@ -272,6 +277,8 @@ protected:
Fields.addInt(Int8Ty, 0);
}
+ virtual llvm::Constant *GenerateCategoryProtocolList(const
+ ObjCCategoryDecl *OCD);
virtual ConstantArrayBuilder PushPropertyListHeader(ConstantStructBuilder &Fields,
int count) {
// int count;
@@ -510,8 +517,8 @@ protected:
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
- virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding);
+ virtual llvm::Value *GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding);
/// Returns the name of ivar offset variables. In the GNUstep v1 ABI, this
/// contains the class and ivar names, in the v2 ABI this contains the type
@@ -810,8 +817,12 @@ class CGObjCGNUstep : public CGObjCGNU {
// Slot_t objc_slot_lookup_super(struct objc_super*, SEL);
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
- // If we're in ObjC++ mode, then we want to make
- if (CGM.getLangOpts().CPlusPlus) {
+ // If we're in ObjC++ mode, then we want to make
+ if (usesSEHExceptions) {
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // void objc_exception_rethrow(void)
+ ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
+ } else if (CGM.getLangOpts().CPlusPlus) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
@@ -888,22 +899,25 @@ class CGObjCGNUstep : public CGObjCGNU {
/// This is the ABI that provides a clean break with the legacy GCC ABI and
/// cleans up a number of things that were added to work around 1980s linkers.
class CGObjCGNUstep2 : public CGObjCGNUstep {
- /// The section for selectors.
- static constexpr const char *const SelSection = "__objc_selectors";
- /// The section for classes.
- static constexpr const char *const ClsSection = "__objc_classes";
- /// The section for references to classes.
- static constexpr const char *const ClsRefSection = "__objc_class_refs";
- /// The section for categories.
- static constexpr const char *const CatSection = "__objc_cats";
- /// The section for protocols.
- static constexpr const char *const ProtocolSection = "__objc_protocols";
- /// The section for protocol references.
- static constexpr const char *const ProtocolRefSection = "__objc_protocol_refs";
- /// The section for class aliases
- static constexpr const char *const ClassAliasSection = "__objc_class_aliases";
- /// The section for constexpr constant strings
- static constexpr const char *const ConstantStringSection = "__objc_constant_string";
+ enum SectionKind
+ {
+ SelectorSection = 0,
+ ClassSection,
+ ClassReferenceSection,
+ CategorySection,
+ ProtocolSection,
+ ProtocolReferenceSection,
+ ClassAliasSection,
+ ConstantStringSection
+ };
+ static const char *const SectionsBaseNames[8];
+ template<SectionKind K>
+ std::string sectionName() {
+ std::string name(SectionsBaseNames[K]);
+ if (CGM.getTriple().isOSBinFormatCOFF())
+ name += "$m";
+ return name;
+ }
/// The GCC ABI superclass message lookup function. Takes a pointer to a
/// structure describing the receiver and the class, and a selector as
/// arguments. Returns the IMP for the corresponding method.
@@ -1069,7 +1083,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
isNamed ? StringRef(StringName) : ".objc_string",
Align, false, isNamed ? llvm::GlobalValue::LinkOnceODRLinkage
: llvm::GlobalValue::PrivateLinkage);
- ObjCStrGV->setSection(ConstantStringSection);
+ ObjCStrGV->setSection(sectionName<ConstantStringSection>());
if (isNamed) {
ObjCStrGV->setComdat(TheModule.getOrInsertComdat(StringName));
ObjCStrGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -1152,6 +1166,15 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
return MethodList.finishAndCreateGlobal(".objc_protocol_method_list",
CGM.getPointerAlign());
}
+ llvm::Constant *GenerateCategoryProtocolList(const ObjCCategoryDecl *OCD)
+ override {
+ SmallVector<llvm::Constant*, 16> Protocols;
+ for (const auto *PI : OCD->getReferencedProtocols())
+ Protocols.push_back(
+ llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
+ ProtocolPtrTy));
+ return GenerateProtocolList(Protocols);
+ }
llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
@@ -1247,9 +1270,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
assert(!TheModule.getGlobalVariable(RefName));
// Emit a reference symbol.
auto GV = new llvm::GlobalVariable(TheModule, ProtocolPtrTy,
- false, llvm::GlobalValue::ExternalLinkage,
+ false, llvm::GlobalValue::LinkOnceODRLinkage,
llvm::ConstantExpr::getBitCast(Protocol, ProtocolPtrTy), RefName);
- GV->setSection(ProtocolRefSection);
+ GV->setComdat(TheModule.getOrInsertComdat(RefName));
+ GV->setSection(sectionName<ProtocolReferenceSection>());
GV->setAlignment(CGM.getPointerAlign().getQuantity());
Ref = GV;
}
@@ -1282,9 +1306,22 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
EmittedProtocol = true;
+ auto SymName = SymbolForProtocol(ProtocolName);
+ auto *OldGV = TheModule.getGlobalVariable(SymName);
+
// Use the protocol definition, if there is one.
if (const ObjCProtocolDecl *Def = PD->getDefinition())
PD = Def;
+ else {
+ // If there is no definition, then create an external linkage symbol and
+ // hope that someone else fills it in for us (and fail to link if they
+ // don't).
+ assert(!OldGV);
+ Protocol = new llvm::GlobalVariable(TheModule, ProtocolTy,
+ /*isConstant*/false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, SymName);
+ return Protocol;
+ }
SmallVector<llvm::Constant*, 16> Protocols;
for (const auto *PI : PD->protocols())
@@ -1301,8 +1338,6 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
EmitProtocolMethodList(PD->class_methods(), ClassMethodList,
OptionalClassMethodList);
- auto SymName = SymbolForProtocol(ProtocolName);
- auto *OldGV = TheModule.getGlobalVariable(SymName);
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
ConstantInitBuilder builder(CGM);
@@ -1326,7 +1361,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *GV = ProtocolBuilder.finishAndCreateGlobal(SymName,
CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
- GV->setSection(ProtocolSection);
+ GV->setSection(sectionName<ProtocolSection>());
GV->setComdat(TheModule.getOrInsertComdat(SymName));
if (OldGV) {
OldGV->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GV,
@@ -1342,8 +1377,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
return Val;
return llvm::ConstantExpr::getBitCast(Val, Ty);
}
- llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding) override {
+ llvm::Value *GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding) override {
return GetConstantSelector(Sel, TypeEncoding);
}
llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
@@ -1359,6 +1394,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
TypeEncoding);
auto *GV = new llvm::GlobalVariable(TheModule, Init->getType(),
true, llvm::GlobalValue::LinkOnceODRLinkage, Init, TypesVarName);
+ GV->setComdat(TheModule.getOrInsertComdat(TypesVarName));
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
TypesGlobal = GV;
}
@@ -1387,12 +1423,41 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
GV->setComdat(TheModule.getOrInsertComdat(SelVarName));
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- GV->setSection(SelSection);
+ GV->setSection(sectionName<SelectorSection>());
auto *SelVal = EnforceType(GV, SelectorTy);
return SelVal;
}
+ llvm::StructType *emptyStruct = nullptr;
+
+ /// Return pointers to the start and end of a section. On ELF platforms, we
+ /// use the __start_ and __stop_ symbols that GNU-compatible linkers will set
+ /// to the start and end of section names, as long as those section names are
+ /// valid identifiers and the symbols are referenced but not defined. On
+ /// Windows, we use the fact that MSVC-compatible linkers will lexically sort
+ /// by subsections and place everything that we want to reference in a middle
+ /// subsection and then insert zero-sized symbols in subsections a and z.
std::pair<llvm::Constant*,llvm::Constant*>
GetSectionBounds(StringRef Section) {
+ if (CGM.getTriple().isOSBinFormatCOFF()) {
+ if (emptyStruct == nullptr) {
+ emptyStruct = llvm::StructType::create(VMContext, ".objc_section_sentinel");
+ emptyStruct->setBody({}, /*isPacked*/true);
+ }
+ auto ZeroInit = llvm::Constant::getNullValue(emptyStruct);
+ auto Sym = [&](StringRef Prefix, StringRef SecSuffix) {
+ auto *Sym = new llvm::GlobalVariable(TheModule, emptyStruct,
+ /*isConstant*/false,
+ llvm::GlobalValue::LinkOnceODRLinkage, ZeroInit, Prefix +
+ Section);
+ Sym->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Sym->setSection((Section + SecSuffix).str());
+ Sym->setComdat(TheModule.getOrInsertComdat((Prefix +
+ Section).str()));
+ Sym->setAlignment(1);
+ return Sym;
+ };
+ return { Sym("__start_", "$a"), Sym("__stop", "$z") };
+ }
auto *Start = new llvm::GlobalVariable(TheModule, PtrTy,
/*isConstant*/false,
llvm::GlobalValue::ExternalLinkage, nullptr, StringRef("__start_") +
@@ -1405,6 +1470,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Stop->setVisibility(llvm::GlobalValue::HiddenVisibility);
return { Start, Stop };
}
+ CatchTypeInfo getCatchAllTypeInfo() override {
+ return CGM.getCXXABI().getCatchAllTypeInfo();
+ }
llvm::Function *ModuleInitFunction() override {
llvm::Function *LoadFunction = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
@@ -1420,19 +1488,11 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantInitBuilder builder(CGM);
auto InitStructBuilder = builder.beginStruct();
InitStructBuilder.addInt(Int64Ty, 0);
- auto addSection = [&](const char *section) {
- auto bounds = GetSectionBounds(section);
+ for (auto *s : SectionsBaseNames) {
+ auto bounds = GetSectionBounds(s);
InitStructBuilder.add(bounds.first);
InitStructBuilder.add(bounds.second);
};
- addSection(SelSection);
- addSection(ClsSection);
- addSection(ClsRefSection);
- addSection(CatSection);
- addSection(ProtocolSection);
- addSection(ProtocolRefSection);
- addSection(ClassAliasSection);
- addSection(ConstantStringSection);
auto *InitStruct = InitStructBuilder.finishAndCreateGlobal(".objc_init",
CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
InitStruct->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -1451,18 +1511,23 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Check that this hasn't been renamed. This shouldn't happen, because
// this function should be called precisely once.
assert(InitVar->getName() == ".objc_ctor");
- InitVar->setSection(".ctors");
+ // In Windows, initialisers are sorted by the suffix. XCL is for library
+ // initialisers, which run before user initialisers. We are running
+ // Objective-C loads at the end of library load. This means +load methods
+ // will run before any other static constructors, but that static
+ // constructors can see a fully initialised Objective-C state.
+ if (CGM.getTriple().isOSBinFormatCOFF())
+ InitVar->setSection(".CRT$XCLz");
+ else
+ InitVar->setSection(".ctors");
InitVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
InitVar->setComdat(TheModule.getOrInsertComdat(".objc_ctor"));
- CGM.addCompilerUsedGlobal(InitVar);
+ CGM.addUsedGlobal(InitVar);
for (auto *C : Categories) {
auto *Cat = cast<llvm::GlobalVariable>(C->stripPointerCasts());
- Cat->setSection(CatSection);
+ Cat->setSection(sectionName<CategorySection>());
CGM.addUsedGlobal(Cat);
}
- // Add a null value fore each special section so that we can always
- // guarantee that the _start and _stop symbols will exist and be
- // meaningful.
auto createNullGlobal = [&](StringRef Name, ArrayRef<llvm::Constant*> Init,
StringRef Section) {
auto nullBuilder = builder.beginStruct();
@@ -1476,38 +1541,48 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGM.addUsedGlobal(GV);
return GV;
};
- createNullGlobal(".objc_null_selector", {NULLPtr, NULLPtr}, SelSection);
- if (Categories.empty())
- createNullGlobal(".objc_null_category", {NULLPtr, NULLPtr,
- NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr}, CatSection);
- if (!EmittedClass) {
- createNullGlobal(".objc_null_cls_init_ref", NULLPtr, ClsSection);
- createNullGlobal(".objc_null_class_ref", { NULLPtr, NULLPtr },
- ClsRefSection);
- }
- if (!EmittedProtocol)
- createNullGlobal(".objc_null_protocol", {NULLPtr, NULLPtr, NULLPtr,
- NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr,
- NULLPtr}, ProtocolSection);
- if (!EmittedProtocolRef)
- createNullGlobal(".objc_null_protocol_ref", {NULLPtr}, ProtocolRefSection);
- if (!ClassAliases.empty())
- for (auto clsAlias : ClassAliases)
- createNullGlobal(std::string(".objc_class_alias") +
- clsAlias.second, { MakeConstantString(clsAlias.second),
- GetClassVar(clsAlias.first) }, ClassAliasSection);
- else
- createNullGlobal(".objc_null_class_alias", { NULLPtr, NULLPtr },
- ClassAliasSection);
- if (ConstantStrings.empty()) {
- auto i32Zero = llvm::ConstantInt::get(Int32Ty, 0);
- createNullGlobal(".objc_null_constant_string", { NULLPtr, i32Zero,
- i32Zero, i32Zero, i32Zero, NULLPtr }, ConstantStringSection);
+ for (auto clsAlias : ClassAliases)
+ createNullGlobal(std::string(".objc_class_alias") +
+ clsAlias.second, { MakeConstantString(clsAlias.second),
+ GetClassVar(clsAlias.first) }, sectionName<ClassAliasSection>());
+ // On ELF platforms, add a null value for each special section so that we
+ // can always guarantee that the _start and _stop symbols will exist and be
+ // meaningful. This is not required on COFF platforms, where our start and
+ // stop symbols will create the section.
+ if (!CGM.getTriple().isOSBinFormatCOFF()) {
+ createNullGlobal(".objc_null_selector", {NULLPtr, NULLPtr},
+ sectionName<SelectorSection>());
+ if (Categories.empty())
+ createNullGlobal(".objc_null_category", {NULLPtr, NULLPtr,
+ NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr},
+ sectionName<CategorySection>());
+ if (!EmittedClass) {
+ createNullGlobal(".objc_null_cls_init_ref", NULLPtr,
+ sectionName<ClassSection>());
+ createNullGlobal(".objc_null_class_ref", { NULLPtr, NULLPtr },
+ sectionName<ClassReferenceSection>());
+ }
+ if (!EmittedProtocol)
+ createNullGlobal(".objc_null_protocol", {NULLPtr, NULLPtr, NULLPtr,
+ NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr,
+ NULLPtr}, sectionName<ProtocolSection>());
+ if (!EmittedProtocolRef)
+ createNullGlobal(".objc_null_protocol_ref", {NULLPtr},
+ sectionName<ProtocolReferenceSection>());
+ if (ClassAliases.empty())
+ createNullGlobal(".objc_null_class_alias", { NULLPtr, NULLPtr },
+ sectionName<ClassAliasSection>());
+ if (ConstantStrings.empty()) {
+ auto i32Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ createNullGlobal(".objc_null_constant_string", { NULLPtr, i32Zero,
+ i32Zero, i32Zero, i32Zero, NULLPtr },
+ sectionName<ConstantStringSection>());
+ }
}
ConstantStrings.clear();
Categories.clear();
Classes.clear();
- return nullptr;//CGObjCGNU::ModuleInitFunction();
+ return nullptr;
}
/// In the v2 ABI, ivar offset variables use the type encoding in their name
/// to trigger linker failures if the types don't match.
@@ -1774,7 +1849,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
auto *classRefSymbol = GetClassVar(className);
- classRefSymbol->setSection(ClsRefSection);
+ classRefSymbol->setSection(sectionName<ClassReferenceSection>());
classRefSymbol->setInitializer(llvm::ConstantExpr::getBitCast(classStruct, IdTy));
@@ -1805,7 +1880,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto classInitRef = new llvm::GlobalVariable(TheModule,
classStruct->getType(), false, llvm::GlobalValue::ExternalLinkage,
classStruct, "._OBJC_INIT_CLASS_" + className);
- classInitRef->setSection(ClsSection);
+ classInitRef->setSection(sectionName<ClassSection>());
CGM.addUsedGlobal(classInitRef);
EmittedClass = true;
@@ -1829,6 +1904,18 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
};
+const char *const CGObjCGNUstep2::SectionsBaseNames[8] =
+{
+"__objc_selectors",
+"__objc_classes",
+"__objc_class_refs",
+"__objc_cats",
+"__objc_protocols",
+"__objc_protocol_refs",
+"__objc_class_aliases",
+"__objc_constant_string"
+};
+
/// Support for the ObjFW runtime.
class CGObjCObjFW: public CGObjCGNU {
protected:
@@ -1931,6 +2018,8 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
ProtocolVersion(protocolClassVersion), ClassABIVersion(classABI) {
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+ usesSEHExceptions =
+ cgm.getContext().getTargetInfo().getTriple().isWindowsMSVCEnvironment();
CodeGenTypes &Types = CGM.getTypes();
IntTy = cast<llvm::IntegerType>(
@@ -2121,8 +2210,8 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
return Value;
}
-llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
- const std::string &TypeEncoding) {
+llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
+ const std::string &TypeEncoding) {
SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = nullptr;
@@ -2155,13 +2244,13 @@ Address CGObjCGNU::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
- return GetSelector(CGF, Sel, std::string());
+ return GetTypedSelector(CGF, Sel, std::string());
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
std::string SelTypes = CGM.getContext().getObjCEncodingForMethodDecl(Method);
- return GetSelector(CGF, Method->getSelector(), SelTypes);
+ return GetTypedSelector(CGF, Method->getSelector(), SelTypes);
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
@@ -2186,6 +2275,9 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
}
llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
+ if (usesSEHExceptions)
+ return CGM.getCXXABI().getAddrOfRTTIDescriptor(T);
+
if (!CGM.getLangOpts().CPlusPlus)
return CGObjCGNU::GetEHType(T);
@@ -3018,18 +3110,21 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
return ptr;
}
+llvm::Constant *CGObjCGNU::GenerateCategoryProtocolList(const
+ ObjCCategoryDecl *OCD) {
+ SmallVector<std::string, 16> Protocols;
+ for (const auto *PD : OCD->getReferencedProtocols())
+ Protocols.push_back(PD->getNameAsString());
+ return GenerateProtocolList(Protocols);
+}
+
void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
const ObjCInterfaceDecl *Class = OCD->getClassInterface();
std::string ClassName = Class->getNameAsString();
std::string CategoryName = OCD->getNameAsString();
// Collect the names of referenced protocols
- SmallVector<std::string, 16> Protocols;
const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
- const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
- for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
- E = Protos.end(); I != E; ++I)
- Protocols.push_back((*I)->getNameAsString());
ConstantInitBuilder Builder(CGM);
auto Elements = Builder.beginStruct();
@@ -3051,7 +3146,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
GenerateMethodList(ClassName, CategoryName, ClassMethods, true),
PtrTy);
// Protocol list
- Elements.addBitCast(GenerateProtocolList(Protocols), PtrTy);
+ Elements.addBitCast(GenerateCategoryProtocolList(CatDecl), PtrTy);
if (isRuntime(ObjCRuntime::GNUstep, 2)) {
const ObjCCategoryDecl *Category =
Class->FindCategoryDeclaration(OCD->getIdentifier());
@@ -3460,12 +3555,16 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
ConstantInitBuilder builder(CGM);
auto selectors = builder.beginArray(selStructTy);
auto &table = SelectorTable; // MSVC workaround
- for (auto &entry : table) {
+ std::vector<Selector> allSelectors;
+ for (auto &entry : table)
+ allSelectors.push_back(entry.first);
+ llvm::sort(allSelectors);
- std::string selNameStr = entry.first.getAsString();
+ for (auto &untypedSel : allSelectors) {
+ std::string selNameStr = untypedSel.getAsString();
llvm::Constant *selName = ExportUniqueString(selNameStr, ".objc_sel_name");
- for (TypedSelector &sel : entry.second) {
+ for (TypedSelector &sel : table[untypedSel]) {
llvm::Constant *selectorTypeEncoding = NULLPtr;
if (!sel.first.empty())
selectorTypeEncoding =
@@ -3726,6 +3825,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint) {
llvm::Value *ExceptionAsObject;
+ bool isRethrow = false;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
@@ -3734,11 +3834,24 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ isRethrow = true;
+ }
+ if (isRethrow && usesSEHExceptions) {
+ // For SEH, ExceptionAsObject may be undef, because the catch handler is
+ // not passed it for catchalls and so it is not visible to the catch
+ // funclet. The real thrown object will still be live on the stack at this
+ // point and will be rethrown. If we are explicitly rethrowing the object
+ // that was passed into the `@catch` block, then this code path is not
+ // reached and we will instead call `objc_exception_throw` with an explicit
+ // argument.
+ CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn).setDoesNotReturn();
+ }
+ else {
+ ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
+ llvm::CallSite Throw =
+ CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
+ Throw.setDoesNotReturn();
}
- ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
- llvm::CallSite Throw =
- CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
- Throw.setDoesNotReturn();
CGF.Builder.CreateUnreachable();
if (ClearInsertionPoint)
CGF.Builder.ClearInsertionPoint();
@@ -3812,40 +3925,10 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// is. This allows code compiled with non-fragile ivars to work correctly
// when linked against code which isn't (most of the time).
llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
- if (!IvarOffsetPointer) {
- // This will cause a run-time crash if we accidentally use it. A value of
- // 0 would seem more sensible, but will silently overwrite the isa pointer
- // causing a great deal of confusion.
- uint64_t Offset = -1;
- // We can't call ComputeIvarBaseOffset() here if we have the
- // implementation, because it will create an invalid ASTRecordLayout object
- // that we are then stuck with forever, so we only initialize the ivar
- // offset variable with a guess if we only have the interface. The
- // initializer will be reset later anyway, when we are generating the class
- // description.
- if (!CGM.getContext().getObjCImplementation(
- const_cast<ObjCInterfaceDecl *>(ID)))
- Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
-
- llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset,
- /*isSigned*/true);
- // Don't emit the guess in non-PIC code because the linker will not be able
- // to replace it with the real version for a library. In non-PIC code you
- // must compile with the fragile ABI if you want to use ivars from a
- // GCC-compiled class.
- if (CGM.getLangOpts().PICLevel) {
- llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
- Int32Ty, false,
- llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
- IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
- IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
- IvarOffsetGV, Name);
- } else {
- IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
- llvm::Type::getInt32PtrTy(VMContext), false,
- llvm::GlobalValue::ExternalLinkage, nullptr, Name);
- }
- }
+ if (!IvarOffsetPointer)
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name);
return IvarOffsetPointer;
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 2b54e7bd67af..d91eb43ca322 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -23,9 +23,9 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
@@ -37,6 +37,7 @@
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -1085,9 +1086,14 @@ public:
const CGBlockInfo &blockInfo) override;
llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
const CGBlockInfo &blockInfo) override;
+ std::string getRCBlockLayoutStr(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) override;
llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
QualType T) override;
+
+private:
+ void fillRunSkipBlockVars(CodeGenModule &CGM, const CGBlockInfo &blockInfo);
};
namespace {
@@ -2795,8 +2801,44 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
return getConstantGEP(VMContext, Entry, 0, 0);
}
-llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
- const CGBlockInfo &blockInfo) {
+static std::string getBlockLayoutInfoString(
+ const SmallVectorImpl<CGObjCCommonMac::RUN_SKIP> &RunSkipBlockVars,
+ bool HasCopyDisposeHelpers) {
+ std::string Str;
+ for (const CGObjCCommonMac::RUN_SKIP &R : RunSkipBlockVars) {
+ if (R.opcode == CGObjCCommonMac::BLOCK_LAYOUT_UNRETAINED) {
+ // Copy/dispose helpers don't have any information about
+ // __unsafe_unretained captures, so unconditionally concatenate a string.
+ Str += "u";
+ } else if (HasCopyDisposeHelpers) {
+ // Information about __strong, __weak, or byref captures has already been
+ // encoded into the names of the copy/dispose helpers. We have to add a
+ // string here only when the copy/dispose helpers aren't generated (which
+ // happens when the block is non-escaping).
+ continue;
+ } else {
+ switch (R.opcode) {
+ case CGObjCCommonMac::BLOCK_LAYOUT_STRONG:
+ Str += "s";
+ break;
+ case CGObjCCommonMac::BLOCK_LAYOUT_BYREF:
+ Str += "r";
+ break;
+ case CGObjCCommonMac::BLOCK_LAYOUT_WEAK:
+ Str += "w";
+ break;
+ default:
+ continue;
+ }
+ }
+ Str += llvm::to_string(R.block_var_bytepos.getQuantity());
+ Str += "l" + llvm::to_string(R.block_var_size.getQuantity());
+ }
+ return Str;
+}
+
+void CGObjCCommonMac::fillRunSkipBlockVars(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
RunSkipBlockVars.clear();
@@ -2845,9 +2887,22 @@ llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
UpdateRunSkipBlockVars(CI.isByRef(), getBlockCaptureLifetime(type, false),
fieldOffset, fieldSize);
}
+}
+
+llvm::Constant *
+CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ fillRunSkipBlockVars(CGM, blockInfo);
return getBitmapBlockLayout(false);
}
+std::string CGObjCCommonMac::getRCBlockLayoutStr(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ fillRunSkipBlockVars(CGM, blockInfo);
+ return getBlockLayoutInfoString(RunSkipBlockVars,
+ blockInfo.needsCopyDisposeHelpers());
+}
+
llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
QualType T) {
assert(CGM.getLangOpts().getGC() == LangOptions::NonGC);
@@ -6783,8 +6838,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
return Entry;
// Use the protocol definition, if there is one.
- if (const ObjCProtocolDecl *Def = PD->getDefinition())
- PD = Def;
+ assert(PD->hasDefinition() &&
+ "emitting protocol metadata without definition");
+ PD = PD->getDefinition();
auto methodLists = ProtocolMethodLists::get(PD);
@@ -7132,15 +7188,21 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
Weak ? llvm::GlobalValue::ExternalWeakLinkage
: llvm::GlobalValue::ExternalLinkage;
-
-
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV) {
- GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
- false, L, nullptr, Name);
+ if (!GV || GV->getType() != ObjCTypes.ClassnfABITy->getPointerTo()) {
+ auto *NewGV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false, L,
+ nullptr, Name);
if (DLLImport)
- GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ NewGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+
+ if (GV) {
+ GV->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(NewGV, GV->getType()));
+ GV->eraseFromParent();
+ }
+ GV = NewGV;
+ CGM.getModule().getGlobalList().push_back(GV);
}
assert(GV->getLinkage() == L);
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index a43885c0f9a2..4b6f24a03f27 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGObjCRuntime.h"
#include "CGCleanup.h"
+#include "CGCXXABI.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -22,6 +23,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/CallSite.h"
+#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
@@ -120,6 +122,8 @@ namespace {
const Stmt *Body;
llvm::BasicBlock *Block;
llvm::Constant *TypeInfo;
+ /// Flags used to differentiate cleanups and catchalls in Windows SEH
+ unsigned Flags;
};
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
@@ -148,13 +152,17 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
if (S.getNumCatchStmts())
Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+ bool useFunclets = EHPersonality::get(CGF).usesFuncletPads();
+
CodeGenFunction::FinallyInfo FinallyInfo;
- if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
- FinallyInfo.enter(CGF, Finally->getFinallyBody(),
- beginCatchFn, endCatchFn, exceptionRethrowFn);
+ if (!useFunclets)
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
SmallVector<CatchHandler, 8> Handlers;
+
// Enter the catch, if there is one.
if (S.getNumCatchStmts()) {
for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
@@ -166,10 +174,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
Handler.Variable = CatchDecl;
Handler.Body = CatchStmt->getCatchBody();
Handler.Block = CGF.createBasicBlock("catch");
+ Handler.Flags = 0;
// @catch(...) always matches.
if (!CatchDecl) {
- Handler.TypeInfo = nullptr; // catch-all
+ auto catchAll = getCatchAllTypeInfo();
+ Handler.TypeInfo = catchAll.RTTI;
+ Handler.Flags = catchAll.Flags;
// Don't consider any other catches.
break;
}
@@ -179,9 +190,31 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
- Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ Catch->setHandler(I, { Handlers[I].TypeInfo, Handlers[I].Flags }, Handlers[I].Block);
}
+ if (useFunclets)
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
+ CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
+ if (!CGF.CurSEHParent)
+ CGF.CurSEHParent = cast<NamedDecl>(CGF.CurFuncDecl);
+ // Outline the finally block.
+ const Stmt *FinallyBlock = Finally->getFinallyBody();
+ HelperCGF.startOutlinedSEHHelper(CGF, /*isFilter*/false, FinallyBlock);
+
+ // Emit the original filter expression, convert to i32, and return.
+ HelperCGF.EmitStmt(FinallyBlock);
+
+ HelperCGF.FinishFunction(FinallyBlock->getEndLoc());
+
+ llvm::Function *FinallyFunc = HelperCGF.CurFn;
+
+
+ // Push a cleanup for __finally blocks.
+ CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc);
+ }
+
+
// Emit the try body.
CGF.EmitStmt(S.getTryBody());
@@ -197,6 +230,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CatchHandler &Handler = Handlers[I];
CGF.EmitBlock(Handler.Block);
+ llvm::CatchPadInst *CPI = nullptr;
+ SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(CGF.CurrentFuncletPad);
+ if (useFunclets)
+ if ((CPI = dyn_cast_or_null<llvm::CatchPadInst>(Handler.Block->getFirstNonPHI()))) {
+ CGF.CurrentFuncletPad = CPI;
+ CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
+ }
llvm::Value *RawExn = CGF.getExceptionFromSlot();
// Enter the catch.
@@ -223,6 +263,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitAutoVarDecl(*CatchParam);
EmitInitOfCatchParam(CGF, CastExn, CatchParam);
}
+ if (CPI)
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.ObjCEHValueStack.push_back(Exn);
CGF.EmitStmt(Handler.Body);
@@ -232,13 +274,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
- }
+ }
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
// Pop out of the finally.
- if (S.getFinallyStmt())
+ if (!useFunclets && S.getFinallyStmt())
FinallyInfo.exit(CGF);
if (Cont.isValid())
@@ -254,7 +296,7 @@ void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
exn = CGF.EmitARCRetainNonBlock(exn);
- // fallthrough
+ LLVM_FALLTHROUGH;
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -277,7 +319,7 @@ namespace {
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ CGF.EmitNounwindRuntimeCall(SyncExitFn, SyncArg);
}
};
}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index ce082a61eb5e..fa16c198adbc 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -17,6 +17,7 @@
#define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
#include "CGBuilder.h"
#include "CGCall.h"
+#include "CGCleanup.h"
#include "CGValue.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/IdentifierTable.h" // Selector
@@ -141,6 +142,8 @@ public:
/// error to Sema.
virtual llvm::Constant *GetEHType(QualType T) = 0;
+ virtual CatchTypeInfo getCatchAllTypeInfo() { return { nullptr, 0 }; }
+
/// Generate a constant string object.
virtual ConstantAddress GenerateConstantString(const StringLiteral *) = 0;
@@ -275,6 +278,10 @@ public:
const CodeGen::CGBlockInfo &blockInfo) = 0;
virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual std::string getRCBlockLayoutStr(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return {};
+ }
/// Returns an i8* which points to the byref layout information.
virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 1da19a90c387..7f6f595dd5d1 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -62,6 +62,11 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
case BuiltinType::OCLReserveID:
return llvm::PointerType::get(
llvm::StructType::create(Ctx, "opencl.reserve_id_t"), AddrSpc);
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return llvm::PointerType::get( \
+ llvm::StructType::create(Ctx, "opencl." #ExtType), AddrSpc);
+#include "clang/Basic/OpenCLExtensionTypes.def"
}
}
@@ -118,25 +123,6 @@ llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() {
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
}
-// Get the block literal from an expression derived from the block expression.
-// OpenCL v2.0 s6.12.5:
-// Block variable declarations are implicitly qualified with const. Therefore
-// all block variables must be initialized at declaration time and may not be
-// reassigned.
-static const BlockExpr *getBlockExpr(const Expr *E) {
- if (auto Cast = dyn_cast<CastExpr>(E)) {
- E = Cast->getSubExpr();
- }
- if (auto DR = dyn_cast<DeclRefExpr>(E)) {
- E = cast<VarDecl>(DR->getDecl())->getInit();
- }
- E = E->IgnoreImplicit();
- if (auto Cast = dyn_cast<CastExpr>(E)) {
- E = Cast->getSubExpr();
- }
- return cast<BlockExpr>(E);
-}
-
/// Record emitted llvm invoke function and llvm block literal for the
/// corresponding block expression.
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
@@ -151,15 +137,21 @@ void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
EnqueuedBlockMap[E].Kernel = nullptr;
}
-llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
- return EnqueuedBlockMap[getBlockExpr(E)].InvokeFunc;
-}
-
CGOpenCLRuntime::EnqueuedBlockInfo
CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
CGF.EmitScalarExpr(E);
- const BlockExpr *Block = getBlockExpr(E);
+ // The block literal may be assigned to a const variable. Chasing down
+ // to get the block literal.
+ if (auto DR = dyn_cast<DeclRefExpr>(E)) {
+ E = cast<VarDecl>(DR->getDecl())->getInit();
+ }
+ E = E->IgnoreImplicit();
+ if (auto Cast = dyn_cast<CastExpr>(E)) {
+ E = Cast->getSubExpr();
+ }
+ auto *Block = cast<BlockExpr>(E);
+
assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
"Block expression not emitted");
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index a513340827a8..750721f1b80f 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
+#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Type.h"
@@ -91,10 +92,6 @@ public:
/// \param Block block literal emitted for the block expression.
void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
llvm::Value *Block);
-
- /// \return LLVM block invoke function emitted for an expression derived from
- /// the block expression.
- llvm::Function *getInvokeFunction(const Expr *E);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index fa850155df4f..20eb0b29f427 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -353,7 +353,7 @@ public:
if (VD->isLocalVarDeclOrParm())
continue;
- DeclRefExpr DRE(const_cast<VarDecl *>(VD),
+ DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
@@ -673,6 +673,9 @@ enum OpenMPRTLFunction {
//
// Offloading related calls
//
+ // Call to void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
+ // size);
+ OMPRTL__kmpc_push_target_tripcount,
// Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
// arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
// *arg_types);
@@ -897,25 +900,6 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
}
-static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
-isDeclareTargetDeclaration(const ValueDecl *VD) {
- for (const Decl *D : VD->redecls()) {
- if (!D->hasAttrs())
- continue;
- if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getMapType();
- }
- if (const auto *V = dyn_cast<VarDecl>(VD)) {
- if (const VarDecl *TD = V->getTemplateInstantiationPattern())
- return isDeclareTargetDeclaration(TD);
- } else if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
- if (const auto *TD = FD->getTemplateInstantiationPattern())
- return isDeclareTargetDeclaration(TD);
- }
-
- return llvm::None;
-}
-
LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
return CGF.EmitOMPSharedLValue(E);
}
@@ -1242,6 +1226,17 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
void CGOpenMPRuntime::clear() {
InternalVars.clear();
+ // Clean non-target variable declarations possibly used only in debug info.
+ for (const auto &Data : EmittedNonTargetVariables) {
+ if (!Data.getValue().pointsToAliveValue())
+ continue;
+ auto *GV = dyn_cast<llvm::GlobalVariable>(Data.getValue());
+ if (!GV)
+ continue;
+ if (!GV->isDeclaration() || GV->getNumUses() > 0)
+ continue;
+ GV->eraseFromParent();
+ }
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
@@ -1314,27 +1309,19 @@ void CGOpenMPRuntime::emitUserDefinedReduction(
CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) {
if (UDRMap.count(D) > 0)
return;
- ASTContext &C = CGM.getContext();
- if (!In || !Out) {
- In = &C.Idents.get("omp_in");
- Out = &C.Idents.get("omp_out");
- }
llvm::Function *Combiner = emitCombinerOrInitializer(
- CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()),
- cast<VarDecl>(D->lookup(Out).front()),
+ CGM, D->getType(), D->getCombiner(),
+ cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerIn())->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(D->getCombinerOut())->getDecl()),
/*IsCombiner=*/true);
llvm::Function *Initializer = nullptr;
if (const Expr *Init = D->getInitializer()) {
- if (!Priv || !Orig) {
- Priv = &C.Idents.get("omp_priv");
- Orig = &C.Idents.get("omp_orig");
- }
Initializer = emitCombinerOrInitializer(
CGM, D->getType(),
D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
: nullptr,
- cast<VarDecl>(D->lookup(Orig).front()),
- cast<VarDecl>(D->lookup(Priv).front()),
+ cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
/*IsCombiner=*/false);
}
UDRMap.try_emplace(D, Combiner, Initializer);
@@ -1406,8 +1393,8 @@ llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
bool Tied, unsigned &NumberOfParts) {
auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF,
PrePostActionTy &) {
- llvm::Value *ThreadID = getThreadID(CGF, D.getLocStart());
- llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getLocStart());
+ llvm::Value *ThreadID = getThreadID(CGF, D.getBeginLoc());
+ llvm::Value *UpLoc = emitUpdateLocation(CGF, D.getBeginLoc());
llvm::Value *TaskArgs[] = {
UpLoc, ThreadID,
CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
@@ -1456,17 +1443,17 @@ static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
template <class... As>
static llvm::GlobalVariable *
-createConstantGlobalStruct(CodeGenModule &CGM, QualType Ty,
- ArrayRef<llvm::Constant *> Data, const Twine &Name,
- As &&... Args) {
+createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
+ ArrayRef<llvm::Constant *> Data, const Twine &Name,
+ As &&... Args) {
const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
ConstantInitBuilder CIBuilder(CGM);
ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
buildStructValue(Fields, CGM, RD, RL, Data);
return Fields.finishAndCreateGlobal(
- Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty),
- /*isConstant=*/true, std::forward<As>(Args)...);
+ Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
+ std::forward<As>(Args)...);
}
template <typename T>
@@ -1483,7 +1470,9 @@ createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
CharUnits Align = CGM.getContext().getTypeAlignInChars(IdentQTy);
- llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
+ unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
+ FlagsTy FlagsKey(Flags, Reserved2Flags);
+ llvm::Value *Entry = OpenMPDefaultLocMap.lookup(FlagsKey);
if (!Entry) {
if (!DefaultOpenMPPSource) {
// Initialize default location for psource field of ident_t structure of
@@ -1496,21 +1485,47 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- llvm::Constant *Data[] = {llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- llvm::ConstantInt::getNullValue(CGM.Int32Ty),
- DefaultOpenMPPSource};
- llvm::GlobalValue *DefaultOpenMPLocation = createConstantGlobalStruct(
- CGM, IdentQTy, Data, "", llvm::GlobalValue::PrivateLinkage);
+ llvm::Constant *Data[] = {
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty),
+ llvm::ConstantInt::get(CGM.Int32Ty, Flags),
+ llvm::ConstantInt::get(CGM.Int32Ty, Reserved2Flags),
+ llvm::ConstantInt::getNullValue(CGM.Int32Ty), DefaultOpenMPPSource};
+ llvm::GlobalValue *DefaultOpenMPLocation =
+ createGlobalStruct(CGM, IdentQTy, isDefaultLocationConstant(), Data, "",
+ llvm::GlobalValue::PrivateLinkage);
DefaultOpenMPLocation->setUnnamedAddr(
llvm::GlobalValue::UnnamedAddr::Global);
- OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
+ OpenMPDefaultLocMap[FlagsKey] = Entry = DefaultOpenMPLocation;
}
return Address(Entry, Align);
}
+void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
+ bool AtCurrentPoint) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ assert(!Elem.second.ServiceInsertPt && "Insert point is set already.");
+
+ llvm::Value *Undef = llvm::UndefValue::get(CGF.Int32Ty);
+ if (AtCurrentPoint) {
+ Elem.second.ServiceInsertPt = new llvm::BitCastInst(
+ Undef, CGF.Int32Ty, "svcpt", CGF.Builder.GetInsertBlock());
+ } else {
+ Elem.second.ServiceInsertPt =
+ new llvm::BitCastInst(Undef, CGF.Int32Ty, "svcpt");
+ Elem.second.ServiceInsertPt->insertAfter(CGF.AllocaInsertPt);
+ }
+}
+
+void CGOpenMPRuntime::clearLocThreadIdInsertPt(CodeGenFunction &CGF) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ if (Elem.second.ServiceInsertPt) {
+ llvm::Instruction *Ptr = Elem.second.ServiceInsertPt;
+ Elem.second.ServiceInsertPt = nullptr;
+ Ptr->eraseFromParent();
+ }
+}
+
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned Flags) {
@@ -1537,8 +1552,10 @@ llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
Elem.second.DebugLoc = AI.getPointer();
LocValue = AI;
+ if (!Elem.second.ServiceInsertPt)
+ setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
CGF.getTypeSize(IdentQTy));
}
@@ -1608,21 +1625,25 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
// kmpc_global_thread_num(ident_t *loc).
// Generate thread id value and cache this value for use across the
// function.
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ if (!Elem.second.ServiceInsertPt)
+ setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
+ CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
llvm::CallInst *Call = CGF.Builder.CreateCall(
createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
Call->setCallingConv(CGF.getRuntimeCC());
- auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = Call;
return Call;
}
void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- if (OpenMPLocThreadIDMap.count(CGF.CurFn))
+ if (OpenMPLocThreadIDMap.count(CGF.CurFn)) {
+ clearLocThreadIdInsertPt(CGF);
OpenMPLocThreadIDMap.erase(CGF.CurFn);
+ }
if (FunctionUDRMap.count(CGF.CurFn) > 0) {
for(auto *D : FunctionUDRMap[CGF.CurFn])
UDRMap.erase(D);
@@ -2145,6 +2166,15 @@ CGOpenMPRuntime::createRuntimeFunction(unsigned Function) {
FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data");
break;
}
+ case OMPRTL__kmpc_push_target_tripcount: {
+ // Build void __kmpc_push_target_tripcount(int64_t device_id, kmp_uint64
+ // size);
+ llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int64Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_target_tripcount");
+ break;
+ }
case OMPRTL__tgt_target: {
// Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t
// arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t
@@ -2417,7 +2447,7 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
if (CGM.getLangOpts().OpenMPSimd)
return Address::invalid();
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD);
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
SmallString<64> PtrName;
{
@@ -2496,8 +2526,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
return nullptr;
VD = VD->getDefinition(CGM.getContext());
- if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
- ThreadPrivateWithDefinition.insert(VD);
+ if (VD && ThreadPrivateWithDefinition.insert(CGM.getMangledName(VD)).second) {
QualType ASTTy = VD->getType();
llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
@@ -2639,16 +2668,16 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit) {
Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD);
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
- return false;
+ return CGM.getLangOpts().OpenMPIsDevice;
VD = VD->getDefinition(CGM.getContext());
- if (VD && !DeclareTargetWithDefinition.insert(VD).second)
+ if (VD && !DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
return CGM.getLangOpts().OpenMPIsDevice;
QualType ASTTy = VD->getType();
- SourceLocation Loc = VD->getCanonicalDecl()->getLocStart();
+ SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
// Produce the unique prefix to identify the new target regions. We use
// the source location of the variable declaration which we know to not
// conflict with any target region.
@@ -3197,13 +3226,7 @@ void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
-void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
- OpenMPDirectiveKind Kind, bool EmitChecks,
- bool ForceSimpleCall) {
- if (!CGF.HaveInsertPoint())
- return;
- // Build call __kmpc_cancel_barrier(loc, thread_id);
- // Build call __kmpc_barrier(loc, thread_id);
+unsigned CGOpenMPRuntime::getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind) {
unsigned Flags;
if (Kind == OMPD_for)
Flags = OMP_IDENT_BARRIER_IMPL_FOR;
@@ -3215,6 +3238,17 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
Flags = OMP_IDENT_BARRIER_EXPL;
else
Flags = OMP_IDENT_BARRIER_IMPL;
+ return Flags;
+}
+
+void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks,
+ bool ForceSimpleCall) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ // Build call __kmpc_barrier(loc, thread_id);
+ unsigned Flags = getDefaultFlagsForBarriers(Kind);
// Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
// thread_id);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
@@ -3287,6 +3321,18 @@ bool CGOpenMPRuntime::isStaticNonchunked(
return Schedule == OMP_dist_sch_static;
}
+bool CGOpenMPRuntime::isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const {
+ OpenMPSchedType Schedule =
+ getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
+ return Schedule == OMP_sch_static_chunked;
+}
+
+bool CGOpenMPRuntime::isStaticChunked(
+ OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const {
+ OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
+ return Schedule == OMP_dist_sch_static_chunked;
+}
bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
OpenMPSchedType Schedule =
@@ -3784,8 +3830,8 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
DeviceImages, Index),
HostEntriesBegin, HostEntriesEnd};
std::string Descriptor = getName({"omp_offloading", "descriptor"});
- llvm::GlobalVariable *Desc = createConstantGlobalStruct(
- CGM, getTgtBinaryDescriptorQTy(), Data, Descriptor);
+ llvm::GlobalVariable *Desc = createGlobalStruct(
+ CGM, getTgtBinaryDescriptorQTy(), /*IsConstant=*/true, Data, Descriptor);
// Emit code to register or unregister the descriptor at execution
// startup or closing, respectively.
@@ -3818,7 +3864,19 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
CGF.disableDebugInfo();
const auto &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- std::string Descriptor = getName({"omp_offloading", "descriptor_reg"});
+
+ // Encode offload target triples into the registration function name. It
+ // will serve as a comdat key for the registration/unregistration code for
+ // this particular combination of offloading targets.
+ SmallVector<StringRef, 4U> RegFnNameParts(Devices.size() + 2U);
+ RegFnNameParts[0] = "omp_offloading";
+ RegFnNameParts[1] = "descriptor_reg";
+ llvm::transform(Devices, std::next(RegFnNameParts.begin(), 2),
+ [](const llvm::Triple &T) -> const std::string& {
+ return T.getTriple();
+ });
+ llvm::sort(std::next(RegFnNameParts.begin(), 2), RegFnNameParts.end());
+ std::string Descriptor = getName(RegFnNameParts);
RegFn = CGM.CreateGlobalInitOrDestructFunction(FTy, Descriptor, FI);
CGF.StartFunction(GlobalDecl(), C.VoidTy, RegFn, FI, FunctionArgList());
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), Desc);
@@ -3868,9 +3926,9 @@ void CGOpenMPRuntime::createOffloadEntry(
llvm::ConstantInt::get(CGM.Int32Ty, Flags),
llvm::ConstantInt::get(CGM.Int32Ty, 0)};
std::string EntryName = getName({"omp_offloading", "entry", ""});
- llvm::GlobalVariable *Entry = createConstantGlobalStruct(
- CGM, getTgtOffloadEntryQTy(), Data, Twine(EntryName).concat(Name),
- llvm::GlobalValue::WeakAnyLinkage);
+ llvm::GlobalVariable *Entry = createGlobalStruct(
+ CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
+ Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
// The entry has to be created in the section the linker expects it to be.
std::string Section = getName({"omp_offloading", "entries"});
@@ -3895,6 +3953,8 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
llvm::LLVMContext &C = M.getContext();
SmallVector<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
OrderedEntries(OffloadEntriesInfoManager.size());
+ llvm::SmallVector<StringRef, 16> ParentFunctions(
+ OffloadEntriesInfoManager.size());
// Auxiliary methods to create metadata values and strings.
auto &&GetMDInt = [this](unsigned V) {
@@ -3909,7 +3969,7 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Create function that emits metadata for each target region entry;
auto &&TargetRegionMetadataEmitter =
- [&C, MD, &OrderedEntries, &GetMDInt, &GetMDString](
+ [&C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt, &GetMDString](
unsigned DeviceID, unsigned FileID, StringRef ParentName,
unsigned Line,
const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
@@ -3929,6 +3989,7 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Save this entry in the right position of the ordered entries array.
OrderedEntries[E.getOrder()] = &E;
+ ParentFunctions[E.getOrder()] = ParentName;
// Add metadata to the named metadata node.
MD->addOperand(llvm::MDNode::get(C, Ops));
@@ -3970,6 +4031,10 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
E)) {
if (!CE->getID() || !CE->getAddress()) {
+ // Do not blame the entry if the parent funtion is not emitted.
+ StringRef FnName = ParentFunctions[CE->getOrder()];
+ if (!CGM.GetGlobalValue(FnName))
+ continue;
unsigned DiagID = CGM.getDiags().getCustomDiagID(
DiagnosticsEngine::Error,
"Offloading entry for target region is incorrect: either the "
@@ -3995,6 +4060,9 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
CGM.getDiags().Report(DiagID);
continue;
}
+ // The vaiable has no definition - no need to add the entry.
+ if (CE->getVarSize().isZero())
+ continue;
break;
}
case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
@@ -5226,8 +5294,8 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
LBLVal.getPointer(),
UBLVal.getPointer(),
CGF.EmitLoadOfScalar(StLVal, Loc),
- llvm::ConstantInt::getNullValue(
- CGF.IntTy), // Always 0 because taskgroup emitted by the compiler
+ llvm::ConstantInt::getSigned(
+ CGF.IntTy, 1), // Always 1 because taskgroup emitted by the compiler
llvm::ConstantInt::getSigned(
CGF.IntTy, Data.Schedule.getPointer()
? Data.Schedule.getInt() ? NumTasks : Grainsize
@@ -5776,7 +5844,7 @@ static std::string generateUniqueName(CodeGenModule &CGM, StringRef Prefix,
std::string Name = CGM.getOpenMPRuntime().getName(
{D->isLocalVarDeclOrParm() ? D->getName() : CGM.getMangledName(D)});
Out << Prefix << Name << "_"
- << D->getCanonicalDecl()->getLocStart().getRawEncoding();
+ << D->getCanonicalDecl()->getBeginLoc().getRawEncoding();
return Out.str();
}
@@ -6274,7 +6342,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
unsigned DeviceID;
unsigned FileID;
unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
+ getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
Line);
SmallString<64> EntryFnName;
{
@@ -6589,17 +6657,17 @@ private:
struct MapInfo {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
- OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
+ ArrayRef<OpenMPMapModifierKind> MapModifiers;
bool ReturnDevicePointer = false;
bool IsImplicit = false;
MapInfo() = default;
MapInfo(
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
bool ReturnDevicePointer, bool IsImplicit)
- : Components(Components), MapType(MapType),
- MapTypeModifier(MapTypeModifier),
+ : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {}
};
@@ -6676,10 +6744,9 @@ private:
/// a flag marking the map as a pointer if requested. Add a flag marking the
/// map as the first one of a series of maps that relate to the same map
/// expression.
- OpenMPOffloadMappingFlags getMapTypeBits(OpenMPMapClauseKind MapType,
- OpenMPMapClauseKind MapTypeModifier,
- bool IsImplicit, bool AddPtrFlag,
- bool AddIsTargetParamFlag) const {
+ OpenMPOffloadMappingFlags getMapTypeBits(
+ OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ bool IsImplicit, bool AddPtrFlag, bool AddIsTargetParamFlag) const {
OpenMPOffloadMappingFlags Bits =
IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
switch (MapType) {
@@ -6702,7 +6769,6 @@ private:
case OMPC_MAP_delete:
Bits |= OMP_MAP_DELETE;
break;
- case OMPC_MAP_always:
case OMPC_MAP_unknown:
llvm_unreachable("Unexpected map type!");
}
@@ -6710,7 +6776,8 @@ private:
Bits |= OMP_MAP_PTR_AND_OBJ;
if (AddIsTargetParamFlag)
Bits |= OMP_MAP_TARGET_PARAM;
- if (MapTypeModifier == OMPC_MAP_always)
+ if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_always)
+ != MapModifiers.end())
Bits |= OMP_MAP_ALWAYS;
return Bits;
}
@@ -6746,10 +6813,11 @@ private:
}
// Check if the length evaluates to 1.
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, CGF.getContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, CGF.getContext()))
return true; // Can have more that size 1.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return ConstLength.getSExtValue() != 1;
}
@@ -6758,12 +6826,15 @@ private:
/// \a IsFirstComponent should be set to true if the provided set of
/// components is the first associated with a capture.
void generateInfoForComponentList(
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
- bool IsImplicit) const {
+ bool IsImplicit,
+ ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
+ OverlappedElements = llvm::None) const {
// The following summarizes what has to be generated for each map and the
// types below. The generated information is expressed in this order:
// base pointer, section pointer, size, flags
@@ -6933,19 +7004,26 @@ private:
// components.
bool IsExpressionFirstInfo = true;
Address BP = Address::invalid();
+ const Expr *AssocExpr = I->getAssociatedExpression();
+ const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(AssocExpr);
- if (isa<MemberExpr>(I->getAssociatedExpression())) {
+ if (isa<MemberExpr>(AssocExpr)) {
// The base is the 'this' pointer. The content of the pointer is going
// to be the base of the field being mapped.
BP = CGF.LoadCXXThisAddress();
+ } else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
+ (OASE &&
+ isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
} else {
// The base is the reference to the variable.
// BP = &Var.
- BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD))
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
IsLink = true;
BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
@@ -7034,7 +7112,6 @@ private:
Address LB =
CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getAddress();
- llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
// If this component is a pointer inside the base struct then we don't
// need to create any entry for it - it will be combined with the object
@@ -7043,6 +7120,70 @@ private:
IsPointer && EncounteredME &&
(dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
EncounteredME);
+ if (!OverlappedElements.empty()) {
+ // Handle base element with the info for overlapped elements.
+ assert(!PartialStruct.Base.isValid() && "The base element is set.");
+ assert(Next == CE &&
+ "Expected last element for the overlapped elements.");
+ assert(!IsPointer &&
+ "Unexpected base element with the pointer type.");
+ // Mark the whole struct as the struct that requires allocation on the
+ // device.
+ PartialStruct.LowestElem = {0, LB};
+ CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
+ I->getAssociatedExpression()->getType());
+ Address HB = CGF.Builder.CreateConstGEP(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LB,
+ CGF.VoidPtrTy),
+ TypeSize.getQuantity() - 1, CharUnits::One());
+ PartialStruct.HighestElem = {
+ std::numeric_limits<decltype(
+ PartialStruct.HighestElem.first)>::max(),
+ HB};
+ PartialStruct.Base = BP;
+ // Emit data for non-overlapped data.
+ OpenMPOffloadMappingFlags Flags =
+ OMP_MAP_MEMBER_OF |
+ getMapTypeBits(MapType, MapModifiers, IsImplicit,
+ /*AddPtrFlag=*/false,
+ /*AddIsTargetParamFlag=*/false);
+ LB = BP;
+ llvm::Value *Size = nullptr;
+ // Do bitcopy of all non-overlapped structure elements.
+ for (OMPClauseMappableExprCommon::MappableExprComponentListRef
+ Component : OverlappedElements) {
+ Address ComponentLB = Address::invalid();
+ for (const OMPClauseMappableExprCommon::MappableComponent &MC :
+ Component) {
+ if (MC.getAssociatedDeclaration()) {
+ ComponentLB =
+ CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
+ .getAddress();
+ Size = CGF.Builder.CreatePtrDiff(
+ CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
+ CGF.EmitCastToVoidPtr(LB.getPointer()));
+ break;
+ }
+ }
+ BasePointers.push_back(BP.getPointer());
+ Pointers.push_back(LB.getPointer());
+ Sizes.push_back(Size);
+ Types.push_back(Flags);
+ LB = CGF.Builder.CreateConstGEP(ComponentLB, 1,
+ CGF.getPointerSize());
+ }
+ BasePointers.push_back(BP.getPointer());
+ Pointers.push_back(LB.getPointer());
+ Size = CGF.Builder.CreatePtrDiff(
+ CGF.EmitCastToVoidPtr(
+ CGF.Builder.CreateConstGEP(HB, 1, CharUnits::One())
+ .getPointer()),
+ CGF.EmitCastToVoidPtr(LB.getPointer()));
+ Sizes.push_back(Size);
+ Types.push_back(Flags);
+ break;
+ }
+ llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
if (!IsMemberPointer) {
BasePointers.push_back(BP.getPointer());
Pointers.push_back(LB.getPointer());
@@ -7053,7 +7194,7 @@ private:
// this map is the first one that relates with the current capture
// (there is a set of entries for each capture).
OpenMPOffloadMappingFlags Flags = getMapTypeBits(
- MapType, MapTypeModifier, IsImplicit,
+ MapType, MapModifiers, IsImplicit,
!IsExpressionFirstInfo || IsLink, IsCaptureFirstInfo && !IsLink);
if (!IsExpressionFirstInfo) {
@@ -7147,6 +7288,66 @@ private:
Flags |= MemberOfFlag;
}
+ void getPlainLayout(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<const FieldDecl *> &Layout,
+ bool AsBase) const {
+ const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
+
+ llvm::StructType *St =
+ AsBase ? RL.getBaseSubobjectLLVMType() : RL.getLLVMType();
+
+ unsigned NumElements = St->getNumElements();
+ llvm::SmallVector<
+ llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
+ RecordLayout(NumElements);
+
+ // Fill bases.
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual())
+ continue;
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ // Ignore empty bases.
+ if (Base->isEmpty() || CGF.getContext()
+ .getASTRecordLayout(Base)
+ .getNonVirtualSize()
+ .isZero())
+ continue;
+
+ unsigned FieldIndex = RL.getNonVirtualBaseLLVMFieldNo(Base);
+ RecordLayout[FieldIndex] = Base;
+ }
+ // Fill in virtual bases.
+ for (const auto &I : RD->vbases()) {
+ const auto *Base = I.getType()->getAsCXXRecordDecl();
+ // Ignore empty bases.
+ if (Base->isEmpty())
+ continue;
+ unsigned FieldIndex = RL.getVirtualBaseIndex(Base);
+ if (RecordLayout[FieldIndex])
+ continue;
+ RecordLayout[FieldIndex] = Base;
+ }
+ // Fill in all the fields.
+ assert(!RD->isUnion() && "Unexpected union.");
+ for (const auto *Field : RD->fields()) {
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!Field->isBitField()) {
+ unsigned FieldIndex = RL.getLLVMFieldNo(Field);
+ RecordLayout[FieldIndex] = Field;
+ }
+ }
+ for (const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
+ &Data : RecordLayout) {
+ if (Data.isNull())
+ continue;
+ if (const auto *Base = Data.dyn_cast<const CXXRecordDecl *>())
+ getPlainLayout(Base, Layout, /*AsBase=*/true);
+ else
+ Layout.push_back(Data.get<const FieldDecl *>());
+ }
+ }
+
public:
MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
: CurDir(Dir), CGF(CGF) {
@@ -7213,28 +7414,29 @@ public:
auto &&InfoGen = [&Info](
const ValueDecl *D,
OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
+ OpenMPMapClauseKind MapType,
+ ArrayRef<OpenMPMapModifierKind> MapModifiers,
bool ReturnDevicePointer, bool IsImplicit) {
const ValueDecl *VD =
D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer,
+ Info[VD].emplace_back(L, MapType, MapModifiers, ReturnDevicePointer,
IsImplicit);
};
// FIXME: MSVC 2013 seems to require this-> to find member CurDir.
for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
for (const auto &L : C->component_lists()) {
- InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
+ InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifiers(),
/*ReturnDevicePointer=*/false, C->isImplicit());
}
for (const auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
for (const auto &L : C->component_lists()) {
- InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
+ InfoGen(L.first, L.second, OMPC_MAP_to, llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit());
}
for (const auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
for (const auto &L : C->component_lists()) {
- InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
+ InfoGen(L.first, L.second, OMPC_MAP_from, llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit());
}
@@ -7287,7 +7489,7 @@ public:
// Nonetheless, generateInfoForComponentList must be called to take
// the pointer into account for the calculation of the range of the
// partial struct.
- InfoGen(nullptr, L.second, OMPC_MAP_unknown, OMPC_MAP_unknown,
+ InfoGen(nullptr, L.second, OMPC_MAP_unknown, llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit());
DeferredInfo[nullptr].emplace_back(IE, VD);
} else {
@@ -7321,7 +7523,7 @@ public:
unsigned CurrentBasePointersIdx = CurBasePointers.size();
// FIXME: MSVC 2013 seems to require this-> to find the member method.
this->generateInfoForComponentList(
- L.MapType, L.MapTypeModifier, L.Components, CurBasePointers,
+ L.MapType, L.MapModifiers, L.Components, CurBasePointers,
CurPointers, CurSizes, CurTypes, PartialStruct,
IsFirstComponentList, L.IsImplicit);
@@ -7375,6 +7577,82 @@ public:
}
}
+ /// Emit capture info for lambdas for variables captured by reference.
+ void generateInfoForLambdaCaptures(
+ const ValueDecl *VD, llvm::Value *Arg, MapBaseValuesArrayTy &BasePointers,
+ MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
+ MapFlagsArrayTy &Types,
+ llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
+ const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl();
+ if (!RD || !RD->isLambda())
+ return;
+ Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
+ LValue VDLVal = CGF.MakeAddrLValue(
+ VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture = nullptr;
+ RD->getCaptureFields(Captures, ThisCapture);
+ if (ThisCapture) {
+ LValue ThisLVal =
+ CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
+ LValue ThisLValVal = CGF.EmitLValueForField(VDLVal, ThisCapture);
+ LambdaPointers.try_emplace(ThisLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(ThisLVal.getPointer());
+ Pointers.push_back(ThisLValVal.getPointer());
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ }
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() != LCK_ByRef)
+ continue;
+ const VarDecl *VD = LC.getCapturedVar();
+ auto It = Captures.find(VD);
+ assert(It != Captures.end() && "Found lambda capture without field.");
+ LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
+ LValue VarLValVal = CGF.EmitLValueForField(VDLVal, It->second);
+ LambdaPointers.try_emplace(VarLVal.getPointer(), VDLVal.getPointer());
+ BasePointers.push_back(VarLVal.getPointer());
+ Pointers.push_back(VarLValVal.getPointer());
+ Sizes.push_back(CGF.getTypeSize(
+ VD->getType().getCanonicalType().getNonReferenceType()));
+ Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ }
+ }
+
+ /// Set correct indices for lambdas captures.
+ void adjustMemberOfForLambdaCaptures(
+ const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
+ MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
+ MapFlagsArrayTy &Types) const {
+ for (unsigned I = 0, E = Types.size(); I < E; ++I) {
+ // Set correct member_of idx for all implicit lambda captures.
+ if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
+ OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
+ continue;
+ llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
+ assert(BasePtr && "Unable to find base lambda address.");
+ int TgtIdx = -1;
+ for (unsigned J = I; J > 0; --J) {
+ unsigned Idx = J - 1;
+ if (Pointers[Idx] != BasePtr)
+ continue;
+ TgtIdx = Idx;
+ break;
+ }
+ assert(TgtIdx != -1 && "Unable to find parent lambda.");
+ // All other current entries will be MEMBER_OF the combined entry
+ // (except for PTR_AND_OBJ entries which do not have a placeholder value
+ // 0xFFFF in the MEMBER_OF field).
+ OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
+ setCorrectMemberOfFlag(Types[I], MemberOfFlag);
+ }
+ }
+
/// Generate the base pointers, section pointers, sizes and map types
/// associated to a given capture.
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
@@ -7387,9 +7665,6 @@ public:
"Not expecting to generate map info for a variable array type!");
// We need to know when we generating information for the first component
- // associated with a capture, because the mapping flags depend on it.
- bool IsFirstComponentList = true;
-
const ValueDecl *VD = Cap->capturesThis()
? nullptr
: Cap->getCapturedVar()->getCanonicalDecl();
@@ -7405,19 +7680,151 @@ public:
return;
}
+ using MapData =
+ std::tuple<OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool>;
+ SmallVector<MapData, 4> DeclComponentLists;
// FIXME: MSVC 2013 seems to require this-> to find member CurDir.
- for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
+ for (const auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) {
for (const auto &L : C->decl_component_lists(VD)) {
assert(L.first == VD &&
"We got information for the wrong declaration??");
assert(!L.second.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(C->getMapType(), C->getMapTypeModifier(),
- L.second, BasePointers, Pointers, Sizes,
- Types, PartialStruct, IsFirstComponentList,
- C->isImplicit());
- IsFirstComponentList = false;
+ DeclComponentLists.emplace_back(L.second, C->getMapType(),
+ C->getMapTypeModifiers(),
+ C->isImplicit());
+ }
+ }
+
+ // Find overlapping elements (including the offset from the base element).
+ llvm::SmallDenseMap<
+ const MapData *,
+ llvm::SmallVector<
+ OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>,
+ 4>
+ OverlappedData;
+ size_t Count = 0;
+ for (const MapData &L : DeclComponentLists) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
+ OpenMPMapClauseKind MapType;
+ ArrayRef<OpenMPMapModifierKind> MapModifiers;
+ bool IsImplicit;
+ std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ ++Count;
+ for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
+ std::tie(Components1, MapType, MapModifiers, IsImplicit) = L1;
+ auto CI = Components.rbegin();
+ auto CE = Components.rend();
+ auto SI = Components1.rbegin();
+ auto SE = Components1.rend();
+ for (; CI != CE && SI != SE; ++CI, ++SI) {
+ if (CI->getAssociatedExpression()->getStmtClass() !=
+ SI->getAssociatedExpression()->getStmtClass())
+ break;
+ // Are we dealing with different variables/fields?
+ if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
+ break;
+ }
+ // Found overlapping if, at least for one component, reached the head of
+ // the components list.
+ if (CI == CE || SI == SE) {
+ assert((CI != CE || SI != SE) &&
+ "Unexpected full match of the mapping components.");
+ const MapData &BaseData = CI == CE ? L : L1;
+ OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
+ SI == SE ? Components : Components1;
+ auto &OverlappedElements = OverlappedData.FindAndConstruct(&BaseData);
+ OverlappedElements.getSecond().push_back(SubData);
+ }
}
+ }
+ // Sort the overlapped elements for each item.
+ llvm::SmallVector<const FieldDecl *, 4> Layout;
+ if (!OverlappedData.empty()) {
+ if (const auto *CRD =
+ VD->getType().getCanonicalType()->getAsCXXRecordDecl())
+ getPlainLayout(CRD, Layout, /*AsBase=*/false);
+ else {
+ const auto *RD = VD->getType().getCanonicalType()->getAsRecordDecl();
+ Layout.append(RD->field_begin(), RD->field_end());
+ }
+ }
+ for (auto &Pair : OverlappedData) {
+ llvm::sort(
+ Pair.getSecond(),
+ [&Layout](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef First,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef
+ Second) {
+ auto CI = First.rbegin();
+ auto CE = First.rend();
+ auto SI = Second.rbegin();
+ auto SE = Second.rend();
+ for (; CI != CE && SI != SE; ++CI, ++SI) {
+ if (CI->getAssociatedExpression()->getStmtClass() !=
+ SI->getAssociatedExpression()->getStmtClass())
+ break;
+ // Are we dealing with different variables/fields?
+ if (CI->getAssociatedDeclaration() !=
+ SI->getAssociatedDeclaration())
+ break;
+ }
+
+ // Lists contain the same elements.
+ if (CI == CE && SI == SE)
+ return false;
+
+ // List with less elements is less than list with more elements.
+ if (CI == CE || SI == SE)
+ return CI == CE;
+
+ const auto *FD1 = cast<FieldDecl>(CI->getAssociatedDeclaration());
+ const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
+ if (FD1->getParent() == FD2->getParent())
+ return FD1->getFieldIndex() < FD2->getFieldIndex();
+ const auto It =
+ llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
+ return FD == FD1 || FD == FD2;
+ });
+ return *It == FD1;
+ });
+ }
+
+ // Associated with a capture, because the mapping flags depend on it.
+ // Go through all of the elements with the overlapped elements.
+ for (const auto &Pair : OverlappedData) {
+ const MapData &L = *Pair.getFirst();
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
+ OpenMPMapClauseKind MapType;
+ ArrayRef<OpenMPMapModifierKind> MapModifiers;
+ bool IsImplicit;
+ std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
+ OverlappedComponents = Pair.getSecond();
+ bool IsFirstComponentList = true;
+ generateInfoForComponentList(MapType, MapModifiers, Components,
+ BasePointers, Pointers, Sizes, Types,
+ PartialStruct, IsFirstComponentList,
+ IsImplicit, OverlappedComponents);
+ }
+ // Go through other elements without overlapped elements.
+ bool IsFirstComponentList = OverlappedData.empty();
+ for (const MapData &L : DeclComponentLists) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
+ OpenMPMapClauseKind MapType;
+ ArrayRef<OpenMPMapModifierKind> MapModifiers;
+ bool IsImplicit;
+ std::tie(Components, MapType, MapModifiers, IsImplicit) = L;
+ auto It = OverlappedData.find(&L);
+ if (It == OverlappedData.end())
+ generateInfoForComponentList(MapType, MapModifiers, Components,
+ BasePointers, Pointers, Sizes, Types,
+ PartialStruct, IsFirstComponentList,
+ IsImplicit);
+ IsFirstComponentList = false;
+ }
}
/// Generate the base pointers, section pointers, sizes and map types
@@ -7436,12 +7843,12 @@ public:
if (!VD)
continue;
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD);
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
continue;
StructRangeInfoTy PartialStruct;
generateInfoForComponentList(
- C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers,
+ C->getMapType(), C->getMapTypeModifiers(), L.second, BasePointers,
Pointers, Sizes, Types, PartialStruct,
/*IsFirstComponentList=*/true, C->isImplicit());
assert(!PartialStruct.Base.isValid() &&
@@ -7658,6 +8065,183 @@ static void emitOffloadingArraysArgument(
}
}
+/// Checks if the expression is constant or does not have non-trivial function
+/// calls.
+static bool isTrivial(ASTContext &Ctx, const Expr * E) {
+ // We can skip constant expressions.
+ // We can skip expressions with trivial calls or simple expressions.
+ return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
+ !E->hasNonTrivialCall(Ctx)) &&
+ !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
+}
+
+/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
+/// iff there is only one that is not evaluatable at the compile time.
+static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
+ if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
+ const Stmt *Child = nullptr;
+ for (const Stmt *S : C->body()) {
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ if (isTrivial(Ctx, E))
+ continue;
+ }
+ // Some of the statements can be ignored.
+ if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
+ isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
+ continue;
+ // Analyze declarations.
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
+ if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
+ isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
+ isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
+ isa<UsingDirectiveDecl>(D) ||
+ isa<OMPDeclareReductionDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D))
+ return true;
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ return VD->isConstexpr() ||
+ ((VD->getType().isTrivialType(Ctx) ||
+ VD->getType()->isReferenceType()) &&
+ (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
+ }))
+ continue;
+ }
+ // Found multiple children - cannot get the one child only.
+ if (Child)
+ return Body;
+ Child = S;
+ }
+ if (Child)
+ return Child;
+ }
+ return Body;
+}
+
+/// Check for inner distribute directive.
+static const OMPExecutableDirective *
+getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
+
+ if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
+ switch (D.getDirectiveKind()) {
+ case OMPD_target:
+ if (isOpenMPDistributeDirective(DKind))
+ return NestedDir;
+ if (DKind == OMPD_teams) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return nullptr;
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPDistributeDirective(DKind))
+ return NND;
+ }
+ }
+ return nullptr;
+ case OMPD_target_teams:
+ if (isOpenMPDistributeDirective(DKind))
+ return NestedDir;
+ return nullptr;
+ case OMPD_target_parallel:
+ case OMPD_target_simd:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ return nullptr;
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ llvm_unreachable("Unexpected directive.");
+ }
+ }
+
+ return nullptr;
+}
+
+void CGOpenMPRuntime::emitTargetNumIterationsCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *Device,
+ const llvm::function_ref<llvm::Value *(
+ CodeGenFunction &CGF, const OMPLoopDirective &D)> &SizeEmitter) {
+ OpenMPDirectiveKind Kind = D.getDirectiveKind();
+ const OMPExecutableDirective *TD = &D;
+ // Get nested teams distribute kind directive, if any.
+ if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
+ TD = getNestedDistributeDirective(CGM.getContext(), D);
+ if (!TD)
+ return;
+ const auto *LD = cast<OMPLoopDirective>(TD);
+ auto &&CodeGen = [LD, &Device, &SizeEmitter, this](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ llvm::Value *NumIterations = SizeEmitter(CGF, *LD);
+
+ // Emit device ID if any.
+ llvm::Value *DeviceID;
+ if (Device)
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGF.Int64Ty, /*isSigned=*/true);
+ else
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+
+ llvm::Value *Args[] = {DeviceID, NumIterations};
+ CGF.EmitRuntimeCall(
+ createRuntimeFunction(OMPRTL__kmpc_push_target_tripcount), Args);
+ };
+ emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
+}
+
void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
llvm::Value *OutlinedFn,
@@ -7790,7 +8374,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
CapturedVars.clear();
CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
}
- emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
CGF.EmitBranch(OffloadContBlock);
CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
@@ -7804,7 +8388,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
CapturedVars.clear();
CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
}
- emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars);
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
@@ -7818,6 +8402,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// Get mappable expression information.
MappableExprsHandler MEHandler(D, CGF);
+ llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
auto RI = CS.getCapturedRecordDecl()->field_begin();
auto CV = CapturedVars.begin();
@@ -7847,6 +8432,12 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
if (CurBasePointers.empty())
MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
CurPointers, CurSizes, CurMapTypes);
+ // Generate correct mapping for variables captured by reference in
+ // lambdas.
+ if (CI->capturesVariable())
+ MEHandler.generateInfoForLambdaCaptures(
+ CI->getCapturedVar(), *CV, CurBasePointers, CurPointers, CurSizes,
+ CurMapTypes, LambdaPointers);
}
// We expect to have at least an element of information for this capture.
assert(!CurBasePointers.empty() &&
@@ -7868,6 +8459,9 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
Sizes.append(CurSizes.begin(), CurSizes.end());
MapTypes.append(CurMapTypes.begin(), CurMapTypes.end());
}
+ // Adjust MEMBER_OF flags for the lambdas captures.
+ MEHandler.adjustMemberOfForLambdaCaptures(LambdaPointers, BasePointers,
+ Pointers, MapTypes);
// Map other list items in the map clause which are not captured variables
// but "declare target link" global variables.
MEHandler.generateInfoForDeclareTargetLink(BasePointers, Pointers, Sizes,
@@ -7935,7 +8529,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
unsigned DeviceID;
unsigned FileID;
unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID,
+ getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
FileID, Line);
// Is this a target region that should not be emitted as an entry point? If
@@ -8030,6 +8624,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_declare_reduction:
case OMPD_taskloop:
case OMPD_taskloop_simd:
+ case OMPD_requires:
case OMPD_unknown:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
}
@@ -8055,19 +8650,20 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
}
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
- const auto *FD = cast<FunctionDecl>(GD.getDecl());
-
// If emitting code for the host, we do not process FD here. Instead we do
// the normal code generation.
if (!CGM.getLangOpts().OpenMPIsDevice)
return false;
+ const ValueDecl *VD = cast<ValueDecl>(GD.getDecl());
+ StringRef Name = CGM.getMangledName(GD);
// Try to detect target regions in the function.
- scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
+ if (const auto *FD = dyn_cast<FunctionDecl>(VD))
+ scanForTargetRegionsFunctions(FD->getBody(), Name);
// Do not to emit function if it is not marked as declare target.
- return !isDeclareTargetDeclaration(FD) &&
- AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
+ return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
+ AlreadyEmittedTargetFunctions.count(Name) == 0;
}
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
@@ -8093,64 +8689,105 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
// Do not to emit variable if it is not marked as declare target.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(cast<VarDecl>(GD.getDecl()));
- return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
+ cast<VarDecl>(GD.getDecl()));
+ if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
+ return true;
+ }
+ return false;
}
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
- if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD)) {
- OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
- StringRef VarName;
- CharUnits VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
- switch (*Res) {
- case OMPDeclareTargetDeclAttr::MT_To:
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
- VarName = CGM.getMangledName(VD);
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!Res) {
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ // Register non-target variables being emitted in device code (debug info
+ // may cause this).
+ StringRef VarName = CGM.getMangledName(VD);
+ EmittedNonTargetVariables.try_emplace(VarName, Addr);
+ }
+ return;
+ }
+ // Register declare target variables.
+ OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
+ StringRef VarName;
+ CharUnits VarSize;
+ llvm::GlobalValue::LinkageTypes Linkage;
+ switch (*Res) {
+ case OMPDeclareTargetDeclAttr::MT_To:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
+ VarName = CGM.getMangledName(VD);
+ if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
- Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
- // Temp solution to prevent optimizations of the internal variables.
- if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
- std::string RefName = getName({VarName, "ref"});
- if (!CGM.GetGlobalValue(RefName)) {
- llvm::Constant *AddrRef =
- getOrCreateInternalVariable(Addr->getType(), RefName);
- auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
- GVAddrRef->setConstant(/*Val=*/true);
- GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
- GVAddrRef->setInitializer(Addr);
- CGM.addCompilerUsedGlobal(GVAddrRef);
- }
- }
- break;
- case OMPDeclareTargetDeclAttr::MT_Link:
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- VarName = Addr->getName();
- Addr = nullptr;
- } else {
- VarName = getAddrOfDeclareTargetLink(VD).getName();
- Addr =
- cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ assert(!VarSize.isZero() && "Expected non-zero size of the variable");
+ } else {
+ VarSize = CharUnits::Zero();
+ }
+ Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ // Temp solution to prevent optimizations of the internal variables.
+ if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
+ std::string RefName = getName({VarName, "ref"});
+ if (!CGM.GetGlobalValue(RefName)) {
+ llvm::Constant *AddrRef =
+ getOrCreateInternalVariable(Addr->getType(), RefName);
+ auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
+ GVAddrRef->setConstant(/*Val=*/true);
+ GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GVAddrRef->setInitializer(Addr);
+ CGM.addCompilerUsedGlobal(GVAddrRef);
}
- VarSize = CGM.getPointerSize();
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- break;
}
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize, Flags, Linkage);
+ break;
+ case OMPDeclareTargetDeclAttr::MT_Link:
+ Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
+ if (CGM.getLangOpts().OpenMPIsDevice) {
+ VarName = Addr->getName();
+ Addr = nullptr;
+ } else {
+ VarName = getAddrOfDeclareTargetLink(VD).getName();
+ Addr = cast<llvm::Constant>(getAddrOfDeclareTargetLink(VD).getPointer());
+ }
+ VarSize = CGM.getPointerSize();
+ Linkage = llvm::GlobalValue::WeakAnyLinkage;
+ break;
}
+ OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
+ VarName, Addr, VarSize, Flags, Linkage);
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
- if (isa<FunctionDecl>(GD.getDecl()))
+ if (isa<FunctionDecl>(GD.getDecl()) ||
+ isa<OMPDeclareReductionDecl>(GD.getDecl()))
return emitTargetFunctions(GD);
return emitTargetGlobalVariable(GD);
}
+void CGOpenMPRuntime::emitDeferredTargetDecls() const {
+ for (const VarDecl *VD : DeferredGlobalVariables) {
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!Res)
+ continue;
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
+ CGM.EmitGlobal(VD);
+ } else {
+ assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
+ "Expected to or link clauses.");
+ (void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ }
+ }
+}
+
+void CGOpenMPRuntime::adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
+ assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
+ " Expected target-based directive.");
+}
+
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
@@ -8169,21 +8806,20 @@ bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
return true;
+ StringRef Name = CGM.getMangledName(GD);
const auto *D = cast<FunctionDecl>(GD.getDecl());
- const FunctionDecl *FD = D->getCanonicalDecl();
// Do not to emit function if it is marked as declare target as it was already
// emitted.
- if (isDeclareTargetDeclaration(D)) {
- if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
- if (auto *F = dyn_cast_or_null<llvm::Function>(
- CGM.GetGlobalValue(CGM.getMangledName(GD))))
+ if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
+ if (D->hasBody() && AlreadyEmittedTargetFunctions.count(Name) == 0) {
+ if (auto *F = dyn_cast_or_null<llvm::Function>(CGM.GetGlobalValue(Name)))
return !F->isDeclaration();
return false;
}
return true;
}
- return !AlreadyEmittedTargetFunctions.insert(FD).second;
+ return !AlreadyEmittedTargetFunctions.insert(Name).second;
}
llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
@@ -8478,6 +9114,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_requires:
case OMPD_unknown:
llvm_unreachable("Unexpected standalone target data directive.");
break;
@@ -8730,8 +9367,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
ParamAttr.Kind = Linear;
if (*SI) {
- if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C,
- Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
if (const auto *DRE =
cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
@@ -8740,6 +9377,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
ParamPositions[StridePVD->getCanonicalDecl()]);
}
}
+ } else {
+ ParamAttr.StrideOrArg = Result.Val.getInt();
}
}
++SI;
@@ -8782,7 +9421,8 @@ public:
} // namespace
void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D) {
+ const OMPLoopDirective &D,
+ ArrayRef<Expr *> NumIterations) {
if (!CGF.HaveInsertPoint())
return;
@@ -8805,37 +9445,50 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
} else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
+ llvm::APInt Size(/*numBits=*/32, NumIterations.size());
+ QualType ArrayTy =
+ C.getConstantArrayType(KmpDimTy, Size, ArrayType::Normal, 0);
- Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
- CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
+ Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
+ CGF.EmitNullInitialization(DimsAddr, ArrayTy);
enum { LowerFD = 0, UpperFD, StrideFD };
// Fill dims with data.
- LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy);
- // dims.upper = num_iterations;
- LValue UpperLVal =
- CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD));
- llvm::Value *NumIterVal = CGF.EmitScalarConversion(
- CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(),
- Int64Ty, D.getNumIterations()->getExprLoc());
- CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
- // dims.stride = 1;
- LValue StrideLVal =
- CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
- StrideLVal);
+ for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
+ LValue DimsLVal =
+ CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
+ DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
+ KmpDimTy);
+ // dims.upper = num_iterations;
+ LValue UpperLVal = CGF.EmitLValueForField(
+ DimsLVal, *std::next(RD->field_begin(), UpperFD));
+ llvm::Value *NumIterVal =
+ CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
+ D.getNumIterations()->getType(), Int64Ty,
+ D.getNumIterations()->getExprLoc());
+ CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
+ // dims.stride = 1;
+ LValue StrideLVal = CGF.EmitLValueForField(
+ DimsLVal, *std::next(RD->field_begin(), StrideFD));
+ CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
+ StrideLVal);
+ }
// Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
// kmp_int32 num_dims, struct kmp_dim * dims);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()),
- getThreadID(CGF, D.getLocStart()),
- llvm::ConstantInt::getSigned(CGM.Int32Ty, 1),
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DimsAddr.getPointer(), CGM.VoidPtrTy)};
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, D.getBeginLoc()),
+ getThreadID(CGF, D.getBeginLoc()),
+ llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.Builder
+ .CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
+ .getPointer(),
+ CGM.VoidPtrTy)};
llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
- emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())};
+ emitUpdateLocation(CGF, D.getEndLoc()), getThreadID(CGF, D.getEndLoc())};
llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
llvm::makeArrayRef(FiniArgs));
@@ -8845,16 +9498,29 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) {
QualType Int64Ty =
CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- const Expr *CounterVal = C->getCounterValue();
- assert(CounterVal);
- llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal),
- CounterVal->getType(), Int64Ty,
- CounterVal->getExprLoc());
- Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr");
- CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()),
- getThreadID(CGF, C->getLocStart()),
- CntAddr.getPointer()};
+ llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
+ QualType ArrayTy = CGM.getContext().getConstantArrayType(
+ Int64Ty, Size, ArrayType::Normal, 0);
+ Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
+ const Expr *CounterVal = C->getLoopData(I);
+ assert(CounterVal);
+ llvm::Value *CntVal = CGF.EmitScalarConversion(
+ CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
+ CounterVal->getExprLoc());
+ CGF.EmitStoreOfScalar(
+ CntVal,
+ CGF.Builder.CreateConstArrayGEP(
+ CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
+ /*Volatile=*/false, Int64Ty);
+ }
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, C->getBeginLoc()),
+ getThreadID(CGF, C->getBeginLoc()),
+ CGF.Builder
+ .CreateConstArrayGEP(CntAddr, 0,
+ CGM.getContext().getTypeSizeInChars(Int64Ty))
+ .getPointer()};
llvm::Value *RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
@@ -9169,7 +9835,8 @@ void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
}
void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D) {
+ const OMPLoopDirective &D,
+ ArrayRef<Expr *> NumIterations) {
llvm_unreachable("Not supported in SIMD-only mode");
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 01ff0c20fd66..1822a6fd1974 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -15,12 +15,13 @@
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
+#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
@@ -278,12 +279,39 @@ protected:
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
+ void setLocThreadIdInsertPt(CodeGenFunction &CGF,
+ bool AtCurrentPoint = false);
+ void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
+
+ /// Check if the default location must be constant.
+ /// Default is false to support OMPT/OMPD.
+ virtual bool isDefaultLocationConstant() const { return false; }
+
+ /// Returns additional flags that can be stored in reserved_2 field of the
+ /// default location.
+ virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
+
+ /// Returns default flags for the barriers depending on the directive, for
+ /// which this barier is going to be emitted.
+ static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
+
+ /// Get the LLVM type for the critical name.
+ llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
+
+ /// Returns corresponding lock object for the specified critical region
+ /// name. If the lock object does not exist it is created, otherwise the
+ /// reference to the existing copy is returned.
+ /// \param CriticalName Name of the critical region.
+ ///
+ llvm::Value *getCriticalRegionLock(StringRef CriticalName);
+
private:
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
+ using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
- typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
+ using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
@@ -300,6 +328,8 @@ private:
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
+ /// Insert point for the service instructions.
+ llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
@@ -315,10 +345,6 @@ private:
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
- IdentifierInfo *In = nullptr;
- IdentifierInfo *Out = nullptr;
- IdentifierInfo *Priv = nullptr;
- IdentifierInfo *Orig = nullptr;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
@@ -600,7 +626,15 @@ private:
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
- llvm::SmallDenseSet<const FunctionDecl *> AlreadyEmittedTargetFunctions;
+ /// List of the emitted functions.
+ llvm::StringSet<> AlreadyEmittedTargetFunctions;
+ /// List of the global variables with their addresses that should not be
+ /// emitted for the target.
+ llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
+
+ /// List of variables that can become declare target implicitly and, thus,
+ /// must be emitted.
+ llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
/// Creates and registers offloading binary descriptor for the current
/// compilation unit. The function that does the registration is returned.
@@ -673,10 +707,10 @@ private:
const llvm::Twine &Name);
/// Set of threadprivate variables with the generated initializer.
- llvm::SmallPtrSet<const VarDecl *, 4> ThreadPrivateWithDefinition;
+ llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
- llvm::SmallPtrSet<const VarDecl *, 4> DeclareTargetWithDefinition;
+ llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
@@ -688,13 +722,6 @@ private:
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
- /// Returns corresponding lock object for the specified critical region
- /// name. If the lock object does not exist it is created, otherwise the
- /// reference to the existing copy is returned.
- /// \param CriticalName Name of the critical region.
- ///
- llvm::Value *getCriticalRegionLock(StringRef CriticalName);
-
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Value *TaskEntry = nullptr;
@@ -884,6 +911,20 @@ public:
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
+ /// Check if the specified \a ScheduleKind is static chunked.
+ /// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
+ /// Check if the specified \a ScheduleKind is static non-chunked.
+ /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
+ /// \param Chunked True if chunk is specified in the clause.
+ ///
+ virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
+ bool Chunked) const;
+
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
@@ -1327,6 +1368,15 @@ public:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
+ /// Emit code that pushes the trip count of loops associated with constructs
+ /// 'target teams distribute' and 'teams distribute parallel for'.
+ /// \param SizeEmitter Emits the int64 value for the number of iterations of
+ /// the associated loop.
+ virtual void emitTargetNumIterationsCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *Device,
+ const llvm::function_ref<llvm::Value *(
+ CodeGenFunction &CGF, const OMPLoopDirective &D)> &SizeEmitter);
+
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
@@ -1465,8 +1515,8 @@ public:
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
- virtual void emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D);
+ virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
+ ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
@@ -1490,6 +1540,18 @@ public:
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
+ /// Choose default schedule type and chunk value for the
+ /// dist_schedule clause.
+ virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
+ llvm::Value *&Chunk) const {}
+
+ /// Choose default schedule type and chunk value for the
+ /// schedule clause.
+ virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
+ const Expr *&ChunkExpr) const {}
+
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
@@ -1505,10 +1567,23 @@ public:
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
- /// Marks the declaration as alread emitted for the device code and returns
+ /// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
+ /// Emit deferred declare target variables marked for deferred emission.
+ void emitDeferredTargetDecls() const;
+
+ /// Adjust some parameters for the target-based directives, like addresses of
+ /// the variables captured by reference in lambdas.
+ virtual void
+ adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D) const;
+
+ /// Perform check on requires decl to ensure that target architecture
+ /// supports unified addressing
+ virtual void checkArchForUnifiedAddressing(CodeGenModule &CGM,
+ const OMPRequiresDecl *D) const {}
};
/// Class supports emissionof SIMD-only code.
@@ -2051,8 +2126,8 @@ public:
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
- void emitDoacrossInit(CodeGenFunction &CGF,
- const OMPLoopDirective &D) override;
+ void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
+ ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index 036b5371fe0b..7046ab3aa35c 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Cuda.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
@@ -32,8 +33,8 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
/// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
OMPRTL_NVPTX__kmpc_spmd_kernel_init,
- /// Call to void __kmpc_spmd_kernel_deinit();
- OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
+ /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
+ OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
/// Call to void __kmpc_kernel_prepare_parallel(void
/// *outlined_function, int16_t
/// IsOMPRuntimeInitialized);
@@ -55,37 +56,27 @@ enum OpenMPRTLFunctionNVPTX {
/// Call to int64_t __kmpc_shuffle_int64(int64_t element,
/// int16_t lane_offset, int16_t warp_size);
OMPRTL_NVPTX__kmpc_shuffle_int64,
- /// Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
+ /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
/// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
/// lane_offset, int16_t shortCircuit),
/// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
- OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
- /// Call to __kmpc_nvptx_simd_reduce_nowait(kmp_int32
- /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
- OMPRTL_NVPTX__kmpc_simd_reduce_nowait,
- /// Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
- /// int32_t num_vars, size_t reduce_size, void *reduce_data,
- /// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
- /// lane_offset, int16_t shortCircuit),
- /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
- /// void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
- /// int32_t index, int32_t width),
- /// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
- /// index, int32_t width, int32_t reduce))
- OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
+ OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2,
+ /// Call to __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
+ /// global_tid, kmp_critical_name *lck)
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple,
+ /// Call to __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc,
+ /// kmp_int32 global_tid, kmp_critical_name *lck)
+ OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple,
/// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
OMPRTL_NVPTX__kmpc_end_reduce_nowait,
/// Call to void __kmpc_data_sharing_init_stack();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
/// Call to void __kmpc_data_sharing_init_stack_spmd();
OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
- /// Call to void* __kmpc_data_sharing_push_stack(size_t size,
+ /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
/// int16_t UseSharedMemory);
- OMPRTL_NVPTX__kmpc_data_sharing_push_stack,
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
/// Call to void __kmpc_data_sharing_pop_stack(void *a);
OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
/// Call to void __kmpc_begin_sharing_variables(void ***args,
@@ -100,6 +91,17 @@ enum OpenMPRTLFunctionNVPTX {
OMPRTL_NVPTX__kmpc_parallel_level,
/// Call to int8_t __kmpc_is_spmd_exec_mode();
OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
+ /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
+ /// const void *buf, size_t size, int16_t is_shared, const void **res);
+ OMPRTL_NVPTX__kmpc_get_team_static_memory,
+ /// Call to void __kmpc_restore_team_static_memory(int16_t
+ /// isSPMDExecutionMode, int16_t is_shared);
+ OMPRTL_NVPTX__kmpc_restore_team_static_memory,
+ /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ OMPRTL__kmpc_barrier,
+ /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
+ /// global_tid);
+ OMPRTL__kmpc_barrier_simple_spmd,
};
/// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
@@ -142,19 +144,35 @@ public:
/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
/// to the target region and used by containing directives such as 'parallel'
/// to emit optimized code.
-class ExecutionModeRAII {
+class ExecutionRuntimeModesRAII {
private:
- CGOpenMPRuntimeNVPTX::ExecutionMode SavedMode;
- CGOpenMPRuntimeNVPTX::ExecutionMode &Mode;
+ CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
+ CGOpenMPRuntimeNVPTX::EM_Unknown;
+ CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
+ bool SavedRuntimeMode = false;
+ bool *RuntimeMode = nullptr;
public:
- ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode, bool IsSPMD)
- : Mode(Mode) {
- SavedMode = Mode;
- Mode = IsSPMD ? CGOpenMPRuntimeNVPTX::EM_SPMD
- : CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ /// Constructor for Non-SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
+ : ExecMode(ExecMode) {
+ SavedExecMode = ExecMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
+ }
+ /// Constructor for SPMD mode.
+ ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
+ bool &RuntimeMode, bool FullRuntimeMode)
+ : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
+ SavedExecMode = ExecMode;
+ SavedRuntimeMode = RuntimeMode;
+ ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
+ RuntimeMode = FullRuntimeMode;
+ }
+ ~ExecutionRuntimeModesRAII() {
+ ExecMode = SavedExecMode;
+ if (RuntimeMode)
+ *RuntimeMode = SavedRuntimeMode;
}
- ~ExecutionModeRAII() { Mode = SavedMode; }
};
/// GPU Configuration: This information can be derived from cuda registers,
@@ -169,16 +187,113 @@ enum MachineConfiguration : unsigned {
LaneIDMask = WarpSize - 1,
/// Global memory alignment for performance.
- GlobalMemoryAlignment = 256,
-};
+ GlobalMemoryAlignment = 128,
-enum NamedBarrier : unsigned {
- /// Synchronize on this barrier #ID using a named barrier primitive.
- /// Only the subset of active threads in a parallel region arrive at the
- /// barrier.
- NB_Parallel = 1,
+ /// Maximal size of the shared memory buffer.
+ SharedMemorySize = 128,
};
+static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
+ RefExpr = RefExpr->IgnoreParens();
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
+ const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
+ const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
+ Base = TempOASE->getBase()->IgnoreParenImpCasts();
+ while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
+ Base = TempASE->getBase()->IgnoreParenImpCasts();
+ RefExpr = Base;
+ }
+ RefExpr = RefExpr->IgnoreParenImpCasts();
+ if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
+ return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
+ const auto *ME = cast<MemberExpr>(RefExpr);
+ return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+}
+
+typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
+static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
+ return P1.first > P2.first;
+}
+
+static RecordDecl *buildRecordForGlobalizedVars(
+ ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
+ ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields) {
+ if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
+ return nullptr;
+ SmallVector<VarsDataTy, 4> GlobalizedVars;
+ for (const ValueDecl *D : EscapedDecls)
+ GlobalizedVars.emplace_back(
+ CharUnits::fromQuantity(std::max(
+ C.getDeclAlign(D).getQuantity(),
+ static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
+ D);
+ for (const ValueDecl *D : EscapedDeclsForTeams)
+ GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
+ std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
+ stable_sort_comparator);
+ // Build struct _globalized_locals_ty {
+ // /* globalized vars */[WarSize] align (max(decl_align,
+ // GlobalMemoryAlignment))
+ // /* globalized vars */ for EscapedDeclsForTeams
+ // };
+ RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
+ GlobalizedRD->startDefinition();
+ llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
+ EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
+ for (const auto &Pair : GlobalizedVars) {
+ const ValueDecl *VD = Pair.second;
+ QualType Type = VD->getType();
+ if (Type->isLValueReferenceType())
+ Type = C.getPointerType(Type.getNonReferenceType());
+ else
+ Type = Type.getNonReferenceType();
+ SourceLocation Loc = VD->getLocation();
+ FieldDecl *Field;
+ if (SingleEscaped.count(VD)) {
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (VD->hasAttrs()) {
+ for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
+ E(VD->getAttrs().end());
+ I != E; ++I)
+ Field->addAttr(*I);
+ }
+ } else {
+ llvm::APInt ArraySize(32, WarpSize);
+ Type = C.getConstantArrayType(Type, ArraySize, ArrayType::Normal, 0);
+ Field = FieldDecl::Create(
+ C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
+ C.getTrivialTypeSourceInfo(Type, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
+ static_cast<CharUnits::QuantityType>(
+ GlobalMemoryAlignment)));
+ Field->addAttr(AlignedAttr::CreateImplicit(
+ C, AlignedAttr::GNU_aligned, /*IsAlignmentExpr=*/true,
+ IntegerLiteral::Create(C, Align,
+ C.getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation())));
+ }
+ GlobalizedRD->addDecl(Field);
+ MappedDeclsFields.try_emplace(VD, Field);
+ }
+ GlobalizedRD->completeDefinition();
+ return GlobalizedRD;
+}
+
/// Get the list of variables that can escape their declaration context.
class CheckVarsEscapingDeclContext final
: public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
@@ -191,20 +306,10 @@ class CheckVarsEscapingDeclContext final
bool AllEscaped = false;
bool IsForCombinedParallelRegion = false;
- static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
- isDeclareTargetDeclaration(const ValueDecl *VD) {
- for (const Decl *D : VD->redecls()) {
- if (!D->hasAttrs())
- continue;
- if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getMapType();
- }
- return llvm::None;
- }
-
void markAsEscaped(const ValueDecl *VD) {
// Do not globalize declare target variables.
- if (!isa<VarDecl>(VD) || isDeclareTargetDeclaration(VD))
+ if (!isa<VarDecl>(VD) ||
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
// Variables captured by value must be globalized.
@@ -218,9 +323,11 @@ class CheckVarsEscapingDeclContext final
const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
if (!Attr)
return;
- if (!isOpenMPPrivate(
- static_cast<OpenMPClauseKind>(Attr->getCaptureKind())) ||
- Attr->getCaptureKind() == OMPC_map)
+ if (((Attr->getCaptureKind() != OMPC_map) &&
+ !isOpenMPPrivate(
+ static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
+ ((Attr->getCaptureKind() == OMPC_map) &&
+ !FD->getType()->isAnyPointerType()))
return;
}
if (!FD->getType()->isReferenceType()) {
@@ -302,55 +409,24 @@ class CheckVarsEscapingDeclContext final
}
}
- typedef std::pair<CharUnits /*Align*/, const ValueDecl *> VarsDataTy;
- static bool stable_sort_comparator(const VarsDataTy P1, const VarsDataTy P2) {
- return P1.first > P2.first;
- }
-
- void buildRecordForGlobalizedVars() {
+ void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
assert(!GlobalizedRD &&
"Record for globalized variables is built already.");
- if (EscapedDecls.empty())
- return;
- ASTContext &C = CGF.getContext();
- SmallVector<VarsDataTy, 4> GlobalizedVars;
- for (const ValueDecl *D : EscapedDecls)
- GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
- std::stable_sort(GlobalizedVars.begin(), GlobalizedVars.end(),
- stable_sort_comparator);
- // Build struct _globalized_locals_ty {
- // /* globalized vars */
- // };
- GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
- GlobalizedRD->startDefinition();
- for (const auto &Pair : GlobalizedVars) {
- const ValueDecl *VD = Pair.second;
- QualType Type = VD->getType();
- if (Type->isLValueReferenceType())
- Type = C.getPointerType(Type.getNonReferenceType());
- else
- Type = Type.getNonReferenceType();
- SourceLocation Loc = VD->getLocation();
- auto *Field = FieldDecl::Create(
- C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
- C.getTrivialTypeSourceInfo(Type, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- GlobalizedRD->addDecl(Field);
- if (VD->hasAttrs()) {
- for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
- E(VD->getAttrs().end());
- I != E; ++I)
- Field->addAttr(*I);
- }
- MappedDeclsFields.try_emplace(VD, Field);
- }
- GlobalizedRD->completeDefinition();
+ ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
+ if (IsInTTDRegion)
+ EscapedDeclsForTeams = EscapedDecls.getArrayRef();
+ else
+ EscapedDeclsForParallel = EscapedDecls.getArrayRef();
+ GlobalizedRD = ::buildRecordForGlobalizedVars(
+ CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
+ MappedDeclsFields);
}
public:
- CheckVarsEscapingDeclContext(CodeGenFunction &CGF) : CGF(CGF) {}
+ CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
+ ArrayRef<const ValueDecl *> TeamsReductions)
+ : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
+ }
virtual ~CheckVarsEscapingDeclContext() = default;
void VisitDeclStmt(const DeclStmt *S) {
if (!S)
@@ -492,9 +568,9 @@ public:
/// Returns the record that handles all the escaped local variables and used
/// instead of their original storage.
- const RecordDecl *getGlobalizedRecord() {
+ const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
if (!GlobalizedRD)
- buildRecordForGlobalizedVars();
+ buildRecordForGlobalizedVars(IsInTTDRegion);
return GlobalizedRD;
}
@@ -568,31 +644,6 @@ static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
"nvptx_num_threads");
}
-/// Get barrier to synchronize all threads in a block.
-static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
- CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
-}
-
-/// Get barrier #ID to synchronize selected (multiple of warp size) threads in
-/// a CTA.
-static void getNVPTXBarrier(CodeGenFunction &CGF, int ID,
- llvm::Value *NumThreads) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads};
- CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier),
- Args);
-}
-
-/// Synchronize all GPU threads in a block.
-static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
-
-/// Synchronize worker threads in a parallel region.
-static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
- return getNVPTXBarrier(CGF, NB_Parallel, NumThreads);
-}
-
/// Get the value of the thread_limit clause in the teams directive.
/// For the 'generic' execution mode, the runtime encodes thread_limit in
/// the launch parameters, always starting thread_limit+warpSize threads per
@@ -654,12 +705,58 @@ getDataSharingMode(CodeGenModule &CGM) {
: CGOpenMPRuntimeNVPTX::Generic;
}
+/// Checks if the expression is constant or does not have non-trivial function
+/// calls.
+static bool isTrivial(ASTContext &Ctx, const Expr * E) {
+ // We can skip constant expressions.
+ // We can skip expressions with trivial calls or simple expressions.
+ return (E->isEvaluatable(Ctx, Expr::SE_AllowUndefinedBehavior) ||
+ !E->hasNonTrivialCall(Ctx)) &&
+ !E->HasSideEffects(Ctx, /*IncludePossibleEffects=*/true);
+}
+
/// Checks if the \p Body is the \a CompoundStmt and returns its child statement
-/// iff there is only one.
-static const Stmt *getSingleCompoundChild(const Stmt *Body) {
- if (const auto *C = dyn_cast<CompoundStmt>(Body))
- if (C->size() == 1)
- return C->body_front();
+/// iff there is only one that is not evaluatable at the compile time.
+static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body) {
+ if (const auto *C = dyn_cast<CompoundStmt>(Body)) {
+ const Stmt *Child = nullptr;
+ for (const Stmt *S : C->body()) {
+ if (const auto *E = dyn_cast<Expr>(S)) {
+ if (isTrivial(Ctx, E))
+ continue;
+ }
+ // Some of the statements can be ignored.
+ if (isa<AsmStmt>(S) || isa<NullStmt>(S) || isa<OMPFlushDirective>(S) ||
+ isa<OMPBarrierDirective>(S) || isa<OMPTaskyieldDirective>(S))
+ continue;
+ // Analyze declarations.
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
+ if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
+ isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
+ isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
+ isa<UsingDirectiveDecl>(D) ||
+ isa<OMPDeclareReductionDecl>(D) ||
+ isa<OMPThreadPrivateDecl>(D))
+ return true;
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ return VD->isConstexpr() ||
+ ((VD->getType().isTrivialType(Ctx) ||
+ VD->getType()->isReferenceType()) &&
+ (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
+ }))
+ continue;
+ }
+ // Found multiple children - cannot get the one child only.
+ if (Child)
+ return Body;
+ Child = S;
+ }
+ if (Child)
+ return Child;
+ }
return Body;
}
@@ -686,8 +783,9 @@ static bool hasParallelIfNumThreadsClause(ASTContext &Ctx,
static bool hasNestedSPMDDirective(ASTContext &Ctx,
const OMPExecutableDirective &D) {
const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body = CS->getCapturedStmt()->IgnoreContainers();
- const Stmt *ChildStmt = getSingleCompoundChild(Body);
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
@@ -696,27 +794,215 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
if (isOpenMPParallelDirective(DKind) &&
!hasParallelIfNumThreadsClause(Ctx, *NestedDir))
return true;
- if (DKind == OMPD_teams || DKind == OMPD_teams_distribute) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ if (DKind == OMPD_teams) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
if (isOpenMPParallelDirective(DKind) &&
!hasParallelIfNumThreadsClause(Ctx, *NND))
return true;
- if (DKind == OMPD_distribute) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ }
+ }
+ return false;
+ case OMPD_target_teams:
+ return isOpenMPParallelDirective(DKind) &&
+ !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
+ case OMPD_target_simd:
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ llvm_unreachable("Unexpected directive.");
+ }
+ }
+
+ return false;
+}
+
+static bool supportsSPMDExecutionMode(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ switch (DirectiveKind) {
+ case OMPD_target:
+ case OMPD_target_teams:
+ return hasNestedSPMDDirective(Ctx, D);
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ return !hasParallelIfNumThreadsClause(Ctx, D);
+ case OMPD_target_simd:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ return false;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ break;
+ }
+ llvm_unreachable(
+ "Unknown programming model for OpenMP directive on NVPTX target.");
+}
+
+/// Check if the directive is loops based and has schedule clause at all or has
+/// static scheduling.
+static bool hasStaticScheduling(const OMPExecutableDirective &D) {
+ assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
+ isOpenMPLoopDirective(D.getDirectiveKind()) &&
+ "Expected loop-based directive.");
+ return !D.hasClausesOfKind<OMPOrderedClause>() &&
+ (!D.hasClausesOfKind<OMPScheduleClause>() ||
+ llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
+ [](const OMPScheduleClause *C) {
+ return C->getScheduleKind() == OMPC_SCHEDULE_static;
+ }));
+}
+
+/// Check for inner (nested) lightweight runtime construct, if any
+static bool hasNestedLightweightDirective(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
+ const auto *CS = D.getInnermostCapturedStmt();
+ const auto *Body =
+ CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
+ const Stmt *ChildStmt = getSingleCompoundChild(Ctx, Body);
+
+ if (const auto *NestedDir = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
+ switch (D.getDirectiveKind()) {
+ case OMPD_target:
+ if (isOpenMPParallelDirective(DKind) &&
+ isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
+ hasStaticScheduling(*NestedDir))
+ return true;
+ if (DKind == OMPD_parallel) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ }
+ } else if (DKind == OMPD_teams) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
+ if (!Body)
+ return false;
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
+ if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
+ DKind = NND->getDirectiveKind();
+ if (isOpenMPParallelDirective(DKind) &&
+ isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
+ if (DKind == OMPD_parallel) {
+ Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
- if (!ChildStmt)
- return false;
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
- return isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NND);
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
}
}
}
@@ -724,25 +1010,28 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
return false;
case OMPD_target_teams:
if (isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NestedDir))
+ isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
+ hasStaticScheduling(*NestedDir))
return true;
- if (DKind == OMPD_distribute) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers();
+ if (DKind == OMPD_parallel) {
+ Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true);
if (!Body)
return false;
- ChildStmt = getSingleCompoundChild(Body);
+ ChildStmt = getSingleCompoundChild(Ctx, Body);
if (const auto *NND = dyn_cast<OMPExecutableDirective>(ChildStmt)) {
DKind = NND->getDirectiveKind();
- return isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NND);
+ if (isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
+ return true;
}
}
return false;
+ case OMPD_target_parallel:
+ return isOpenMPWorksharingDirective(DKind) &&
+ isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
case OMPD_target_teams_distribute:
- return isOpenMPParallelDirective(DKind) &&
- !hasParallelIfNumThreadsClause(Ctx, *NestedDir);
case OMPD_target_simd:
- case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_simd:
@@ -790,6 +1079,7 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
case OMPD_declare_reduction:
case OMPD_taskloop:
case OMPD_taskloop_simd:
+ case OMPD_requires:
case OMPD_unknown:
llvm_unreachable("Unexpected directive.");
}
@@ -798,21 +1088,26 @@ static bool hasNestedSPMDDirective(ASTContext &Ctx,
return false;
}
-static bool supportsSPMDExecutionMode(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
+/// Checks if the construct supports lightweight runtime. It must be SPMD
+/// construct + inner loop-based construct with static scheduling.
+static bool supportsLightweightRuntime(ASTContext &Ctx,
+ const OMPExecutableDirective &D) {
+ if (!supportsSPMDExecutionMode(Ctx, D))
+ return false;
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
switch (DirectiveKind) {
case OMPD_target:
case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- return hasNestedSPMDDirective(Ctx, D);
case OMPD_target_parallel:
+ return hasNestedLightweightDirective(Ctx, D);
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
- return !hasParallelIfNumThreadsClause(Ctx, D);
+ // (Last|First)-privates must be shared in parallel region.
+ return hasStaticScheduling(D);
case OMPD_target_simd:
+ case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
return false;
case OMPD_parallel:
@@ -857,6 +1152,7 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_declare_reduction:
case OMPD_taskloop:
case OMPD_taskloop_simd:
+ case OMPD_requires:
case OMPD_unknown:
break;
}
@@ -870,9 +1166,9 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/false);
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
EntryFunctionState EST;
- WorkerFunctionState WST(CGM, D.getLocStart());
+ WorkerFunctionState WST(CGM, D.getBeginLoc());
Work.clear();
WrapperFunctionsMap.clear();
@@ -886,17 +1182,35 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
: EST(EST), WST(WST) {}
void Enter(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitNonSPMDEntryHeader(CGF, EST, WST);
+ auto &RT =
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
+ RT.emitNonSPMDEntryHeader(CGF, EST, WST);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitNonSPMDEntryFooter(CGF, EST);
+ auto &RT =
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
+ RT.clearLocThreadIdInsertPt(CGF);
+ RT.emitNonSPMDEntryFooter(CGF, EST);
}
} Action(EST, WST);
CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
// Now change the name of the worker function to correspond to this target
// region's entry function.
@@ -984,7 +1298,10 @@ void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionModeRAII ModeRAII(CurrentExecutionMode, /*IsSPMD=*/true);
+ ExecutionRuntimeModesRAII ModeRAII(
+ CurrentExecutionMode, RequiresFullRuntime,
+ CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
+ !supportsLightweightRuntime(CGM.getContext(), D));
EntryFunctionState EST;
// Emit target region as a standalone region.
@@ -1000,14 +1317,30 @@ void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
: RT(RT), EST(EST), D(D) {}
void Enter(CodeGenFunction &CGF) override {
RT.emitSPMDEntryHeader(CGF, EST, D);
+ // Skip target region initialization.
+ RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
+ RT.clearLocThreadIdInsertPt(CGF);
RT.emitSPMDEntryFooter(CGF, EST);
}
} Action(*this, EST, D);
CodeGen.setAction(Action);
+ IsInTTDRegion = true;
+ // Reserve place for the globalized memory.
+ GlobalizedRecords.emplace_back();
+ if (!KernelStaticGlobalized) {
+ KernelStaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
+ "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
+ }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
+ IsInTTDRegion = false;
}
void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
@@ -1019,19 +1352,18 @@ void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
EST.ExitBB = CGF.createBasicBlock(".exit");
- // Initialize the OMP state in the runtime; called by all active threads.
- // TODO: Set RequiresOMPRuntime and RequiresDataSharing parameters
- // based on code analysis of the target region.
llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
- /*RequiresOMPRuntime=*/Bld.getInt16(1),
- /*RequiresDataSharing=*/Bld.getInt16(1)};
+ /*RequiresOMPRuntime=*/
+ Bld.getInt16(RequiresFullRuntime ? 1 : 0),
+ /*RequiresDataSharing=*/Bld.getInt16(0)};
CGF.EmitRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
- // For data sharing, we need to initialize the stack.
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(
- OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
+ if (RequiresFullRuntime) {
+ // For data sharing, we need to initialize the stack.
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
+ }
CGF.EmitBranch(ExecuteBB);
@@ -1054,8 +1386,11 @@ void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
CGF.EmitBlock(OMPDeInitBB);
// DeInitialize the OMP state in the runtime; called by all active threads.
+ llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
+ CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_deinit), None);
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
@@ -1142,6 +1477,8 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// Signal start of parallel region.
CGF.EmitBlock(ExecuteBB);
+ // Skip initialization.
+ setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
// Process work items: outlined parallel functions.
for (llvm::Function *W : Work) {
@@ -1202,6 +1539,8 @@ void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
// Exit target region.
CGF.EmitBlock(ExitBB);
+ // Skip initialization.
+ clearLocThreadIdInsertPt(CGF);
}
/// Returns specified OpenMP runtime function for the current OpenMP
@@ -1238,11 +1577,12 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
break;
}
- case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
- // Build void __kmpc_spmd_kernel_deinit();
+ case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
+ // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
+ llvm::Type *TypeParams[] = {CGM.Int16Ty};
auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
break;
}
case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
@@ -1307,12 +1647,12 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
break;
}
- case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait: {
- // Build int32_t kmpc_nvptx_parallel_reduce_nowait(kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t Algorithm Version),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
+ case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2: {
+ // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
+ // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
+ // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
+ // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
+ // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
CGM.Int16Ty, CGM.Int16Ty};
auto *ShuffleReduceFnTy =
@@ -1322,7 +1662,8 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
auto *InterWarpCopyFnTy =
llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
/*isVarArg=*/false);
- llvm::Type *TypeParams[] = {CGM.Int32Ty,
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
+ CGM.Int32Ty,
CGM.Int32Ty,
CGM.SizeTy,
CGM.VoidPtrTy,
@@ -1331,86 +1672,40 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
break;
}
- case OMPRTL_NVPTX__kmpc_simd_reduce_nowait: {
- // Build int32_t kmpc_nvptx_simd_reduce_nowait(kmp_int32 global_tid,
- // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t Algorithm Version),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.SizeTy,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo()};
+ case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
+ // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {CGM.Int32Ty};
auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_simd_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
break;
}
- case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
- // Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
- // int32_t num_vars, size_t reduce_size, void *reduce_data,
- // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
- // lane_offset, int16_t shortCircuit),
- // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
- // void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
- // int32_t index, int32_t width),
- // void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad,
- // int32_t index, int32_t width, int32_t reduce))
- llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
- CGM.Int16Ty, CGM.Int16Ty};
- auto *ShuffleReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
- auto *InterWarpCopyFnTy =
- llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
- /*isVarArg=*/false);
- llvm::Type *CopyToScratchpadTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy,
- CGM.Int32Ty, CGM.Int32Ty};
- auto *CopyToScratchpadFnTy =
- llvm::FunctionType::get(CGM.VoidTy, CopyToScratchpadTypeParams,
- /*isVarArg=*/false);
- llvm::Type *LoadReduceTypeParams[] = {
- CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty};
- auto *LoadReduceFnTy =
- llvm::FunctionType::get(CGM.VoidTy, LoadReduceTypeParams,
- /*isVarArg=*/false);
- llvm::Type *TypeParams[] = {CGM.Int32Ty,
- CGM.Int32Ty,
- CGM.SizeTy,
- CGM.VoidPtrTy,
- ShuffleReduceFnTy->getPointerTo(),
- InterWarpCopyFnTy->getPointerTo(),
- CopyToScratchpadFnTy->getPointerTo(),
- LoadReduceFnTy->getPointerTo()};
+ case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple: {
+ // Build __kmpc_nvptx_teams_reduce_nowait_simple(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *lck)
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_simple");
break;
}
- case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
- // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
- llvm::Type *TypeParams[] = {CGM.Int32Ty};
+ case OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple: {
+ // Build __kmpc_nvptx_teams_end_reduce_nowait_simple(ident_t *loc, kmp_int32
+ // global_tid, kmp_critical_name *lck)
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(getKmpCriticalNameTy())};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
+ FnTy, /*Name=*/"__kmpc_nvptx_teams_end_reduce_nowait_simple");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
@@ -1424,17 +1719,18 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
/// Build void __kmpc_data_sharing_init_stack_spmd();
auto *FnTy =
llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
- RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
break;
}
- case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: {
- // Build void *__kmpc_data_sharing_push_stack(size_t size,
+ case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
+ // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
// int16_t UseSharedMemory);
llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
auto *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(
- FnTy, /*Name=*/"__kmpc_data_sharing_push_stack");
+ FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
break;
}
case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
@@ -1484,6 +1780,46 @@ CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
break;
}
+ case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
+ // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
+ // const void *buf, size_t size, int16_t is_shared, const void **res);
+ llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
+ CGM.Int16Ty, CGM.VoidPtrPtrTy};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
+ break;
+ }
+ case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
+ // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
+ // int16_t is_shared);
+ llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
+ break;
+ }
+ case OMPRTL__kmpc_barrier: {
+ // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
+ cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ break;
+ }
+ case OMPRTL__kmpc_barrier_simple_spmd: {
+ // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ auto *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn =
+ CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
+ cast<llvm::Function>(RTLFn)->addFnAttr(llvm::Attribute::Convergent);
+ break;
+ }
}
return RTLFn;
}
@@ -1530,6 +1866,37 @@ void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
}
+namespace {
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+/// Enum for accesseing the reserved_2 field of the ident_t struct.
+enum ModeFlagsTy : unsigned {
+ /// Bit set to 1 when in SPMD mode.
+ KMP_IDENT_SPMD_MODE = 0x01,
+ /// Bit set to 1 when a simplified runtime is used.
+ KMP_IDENT_SIMPLE_RT_MODE = 0x02,
+ LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
+};
+
+/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
+static const ModeFlagsTy UndefinedMode =
+ (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
+} // anonymous namespace
+
+unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
+ switch (getExecutionMode()) {
+ case EM_SPMD:
+ if (requiresFullRuntime())
+ return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
+ return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
+ case EM_NonSPMD:
+ assert(requiresFullRuntime() && "Expected full runtime.");
+ return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
+ case EM_Unknown:
+ return UndefinedMode;
+ }
+ llvm_unreachable("Unknown flags are requested.");
+}
+
CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, "_", "$") {
if (!CGM.getLangOpts().OpenMPIsDevice)
@@ -1581,12 +1948,15 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
}
} Action(IsInParallelRegion);
CodeGen.setAction(Action);
+ bool PrevIsInTTDRegion = IsInTTDRegion;
+ IsInTTDRegion = false;
bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
IsInTargetMasterThreadRegion = false;
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen));
IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
+ IsInTTDRegion = PrevIsInTTDRegion;
if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
!IsInParallelRegion) {
llvm::Function *WrapperFun =
@@ -1597,26 +1967,106 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
return OutlinedFun;
}
+/// Get list of lastprivate variables from the teams distribute ... or
+/// teams {distribute ...} directives.
+static void
+getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
+ assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
+ "expected teams directive.");
+ const OMPExecutableDirective *Dir = &D;
+ if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
+ if (const Stmt *S = getSingleCompoundChild(
+ Ctx,
+ D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
+ /*IgnoreCaptured=*/true))) {
+ Dir = dyn_cast<OMPExecutableDirective>(S);
+ if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
+ Dir = nullptr;
+ }
+ }
+ if (!Dir)
+ return;
+ for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
+ for (const Expr *E : C->getVarRefs())
+ Vars.push_back(getPrivateItem(E));
+ }
+}
+
+/// Get list of reduction variables from the teams ... directives.
+static void
+getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
+ assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
+ "expected teams directive.");
+ for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
+ for (const Expr *E : C->privates())
+ Vars.push_back(getPrivateItem(E));
+ }
+}
+
llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
- SourceLocation Loc = D.getLocStart();
+ SourceLocation Loc = D.getBeginLoc();
+
+ const RecordDecl *GlobalizedRD = nullptr;
+ llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
+ // Globalize team reductions variable unconditionally in all modes.
+ getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
+ getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
+ if (!LastPrivatesReductions.empty()) {
+ GlobalizedRD = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), llvm::None, LastPrivatesReductions,
+ MappedDeclsFields);
+ }
+ } else if (!LastPrivatesReductions.empty()) {
+ assert(!TeamAndReductions.first &&
+ "Previous team declaration is not expected.");
+ TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
+ std::swap(TeamAndReductions.second, LastPrivatesReductions);
+ }
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
SourceLocation &Loc;
+ const RecordDecl *GlobalizedRD;
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields;
public:
- NVPTXPrePostActionTy(SourceLocation &Loc) : Loc(Loc) {}
+ NVPTXPrePostActionTy(
+ SourceLocation &Loc, const RecordDecl *GlobalizedRD,
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
+ &MappedDeclsFields)
+ : Loc(Loc), GlobalizedRD(GlobalizedRD),
+ MappedDeclsFields(MappedDeclsFields) {}
void Enter(CodeGenFunction &CGF) override {
- static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitGenericVarsProlog(CGF, Loc);
+ auto &Rt =
+ static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
+ if (GlobalizedRD) {
+ auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
+ I->getSecond().GlobalRecord = GlobalizedRD;
+ I->getSecond().MappedParams =
+ llvm::make_unique<CodeGenFunction::OMPMapVars>();
+ DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
+ for (const auto &Pair : MappedDeclsFields) {
+ assert(Pair.getFirst()->isCanonicalDecl() &&
+ "Expected canonical declaration");
+ Data.insert(std::make_pair(Pair.getFirst(),
+ MappedVarData(Pair.getSecond(),
+ /*IsOnePerTeam=*/true)));
+ }
+ }
+ Rt.emitGenericVarsProlog(CGF, Loc);
}
void Exit(CodeGenFunction &CGF) override {
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
.emitGenericVarsEpilog(CGF);
}
- } Action(Loc);
+ } Action(Loc, GlobalizedRD, MappedDeclsFields);
CodeGen.setAction(Action);
llvm::Value *OutlinedFunVal = CGOpenMPRuntime::emitTeamsOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen);
@@ -1629,8 +2079,10 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
}
void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
- SourceLocation Loc) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+ SourceLocation Loc,
+ bool WithSPMDCheck) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
+ getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
CGBuilderTy &Bld = CGF.Builder;
@@ -1639,33 +2091,187 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
if (I == FunctionGlobalizedDecls.end())
return;
if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
- QualType RecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+ QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
+ QualType SecGlobalRecTy;
// Recover pointer to this function's global record. The runtime will
// handle the specifics of the allocation of the memory.
// Use actual memory size of the record including the padding
// for alignment purposes.
unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(RecTy).getQuantity();
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(RecTy).getQuantity();
+ CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
- GlobalRecordSizeArg);
- llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(RecTy)->getPointerTo());
+
+ llvm::PointerType *GlobalRecPtrTy =
+ CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
+ llvm::Value *GlobalRecCastAddr;
+ llvm::Value *IsTTD = nullptr;
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
+ llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
+ if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *PL = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
+ {RTLoc, ThreadID});
+ IsTTD = Bld.CreateIsNull(PL);
+ }
+ llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
+ Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(SPMDBB);
+ Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
+ CharUnits::fromQuantity(Alignment));
+ CGF.EmitBranch(ExitBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(NonSPMDBB);
+ llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
+ if (const RecordDecl *SecGlobalizedVarsRecord =
+ I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
+ SecGlobalRecTy =
+ CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
+
+ // Recover pointer to this function's global record. The runtime will
+ // handle the specifics of the allocation of the memory.
+ // Use actual memory size of the record including the padding
+ // for alignment purposes.
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
+ unsigned GlobalRecordSize =
+ CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
+ GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
+ Size = Bld.CreateSelect(
+ IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
+ }
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, GlobalRecPtrTy);
+ CGF.EmitBlock(ExitBB);
+ auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
+ /*NumReservedValues=*/2, "_select_stack");
+ Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
+ Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
+ GlobalRecCastAddr = Phi;
+ I->getSecond().GlobalRecordAddr = Phi;
+ I->getSecond().IsInSPMDModeFlag = IsSPMD;
+ } else if (IsInTTDRegion) {
+ assert(GlobalizedRecords.back().Records.size() < 2 &&
+ "Expected less than 2 globalized records: one for target and one "
+ "for teams.");
+ unsigned Offset = 0;
+ for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
+ QualType RDTy = CGM.getContext().getRecordType(RD);
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
+ unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
+ Offset =
+ llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
+ }
+ unsigned Alignment =
+ CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
+ Offset = llvm::alignTo(Offset, Alignment);
+ GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
+ ++GlobalizedRecords.back().RegionCounter;
+ if (GlobalizedRecords.back().Records.size() == 1) {
+ assert(KernelStaticGlobalized &&
+ "Kernel static pointer must be initialized already.");
+ auto *UseSharedMemory = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$is_shared");
+ UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, Loc);
+ auto *StaticGlobalized = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
+ llvm::GlobalValue::CommonLinkage, nullptr);
+ auto *RecSize = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage, nullptr,
+ "_openmp_static_kernel$size");
+ RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ llvm::Value *Ld = CGF.EmitLoadOfScalar(
+ Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
+ CGM.getContext().getSizeType(), Loc);
+ llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ KernelStaticGlobalized, CGM.VoidPtrPtrTy);
+ llvm::Value *GlobalRecordSizeArg[] = {
+ llvm::ConstantInt::get(
+ CGM.Int16Ty,
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
+ StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_get_team_static_memory),
+ GlobalRecordSizeArg);
+ GlobalizedRecords.back().Buffer = StaticGlobalized;
+ GlobalizedRecords.back().RecSize = RecSize;
+ GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
+ GlobalizedRecords.back().Loc = Loc;
+ }
+ assert(KernelStaticGlobalized && "Global address must be set already.");
+ Address FrameAddr = CGF.EmitLoadOfPointer(
+ Address(KernelStaticGlobalized, CGM.getPointerAlign()),
+ CGM.getContext()
+ .getPointerType(CGM.getContext().VoidPtrTy)
+ .castAs<PointerType>());
+ llvm::Value *GlobalRecValue =
+ Bld.CreateConstInBoundsGEP(FrameAddr, Offset, CharUnits::One())
+ .getPointer();
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ I->getSecond().IsInSPMDModeFlag = nullptr;
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
+ } else {
+ // TODO: allow the usage of shared memory to be controlled by
+ // the user, for now, default to global.
+ llvm::Value *GlobalRecordSizeArg[] = {
+ llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
+ CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
+ llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
+ GlobalRecordSizeArg);
+ GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ GlobalRecValue, GlobalRecPtrTy);
+ I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ I->getSecond().IsInSPMDModeFlag = nullptr;
+ }
LValue Base =
- CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, RecTy);
- I->getSecond().GlobalRecordAddr = GlobalRecValue;
+ CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
// Emit the "global alloca" which is a GEP from the global declaration
// record using the pointer returned by the runtime.
+ LValue SecBase;
+ decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
+ if (IsTTD) {
+ SecIt = I->getSecond().SecondaryLocalVarData->begin();
+ llvm::PointerType *SecGlobalRecPtrTy =
+ CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
+ SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
+ Bld.CreatePointerBitCastOrAddrSpaceCast(
+ I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
+ SecGlobalRecTy);
+ }
for (auto &Rec : I->getSecond().LocalVarData) {
bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
llvm::Value *ParValue;
@@ -1675,14 +2281,51 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
}
- const FieldDecl *FD = Rec.second.first;
- LValue VarAddr = CGF.EmitLValueForField(Base, FD);
- Rec.second.second = VarAddr.getAddress();
+ LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
+ // Emit VarAddr basing on lane-id if required.
+ QualType VarTy;
+ if (Rec.second.IsOnePerTeam) {
+ VarTy = Rec.second.FD->getType();
+ } else {
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
+ VarAddr.getAddress().getPointer(),
+ {Bld.getInt32(0), getNVPTXLaneID(CGF)});
+ VarTy =
+ Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
+ VarAddr = CGF.MakeAddrLValue(
+ Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
+ AlignmentSource::Decl);
+ }
+ Rec.second.PrivateAddr = VarAddr.getAddress();
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
+ assert(I->getSecond().IsInSPMDModeFlag &&
+ "Expected unknown execution mode or required SPMD check.");
+ if (IsTTD) {
+ assert(SecIt->second.IsOnePerTeam &&
+ "Secondary glob data must be one per team.");
+ LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
+ VarAddr.setAddress(
+ Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(),
+ VarAddr.getPointer()),
+ VarAddr.getAlignment()));
+ Rec.second.PrivateAddr = VarAddr.getAddress();
+ }
+ Address GlobalPtr = Rec.second.PrivateAddr;
+ Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
+ Rec.second.PrivateAddr = Address(
+ Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
+ LocalAddr.getPointer(), GlobalPtr.getPointer()),
+ LocalAddr.getAlignment());
+ }
if (EscapedParam) {
const auto *VD = cast<VarDecl>(Rec.first);
CGF.EmitStoreOfScalar(ParValue, VarAddr);
I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
+ if (IsTTD)
+ ++SecIt;
}
}
for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
@@ -1704,7 +2347,8 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
llvm::Value *GlobalRecordSizeArg[] = {
Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_push_stack),
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
GlobalRecordSizeArg);
llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
@@ -1718,8 +2362,10 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
I->getSecond().MappedParams->apply(CGF);
}
-void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
+void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
+ bool WithSPMDCheck) {
+ if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
+ getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
return;
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
@@ -1734,9 +2380,48 @@ void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF) {
Addr);
}
if (I->getSecond().GlobalRecordAddr) {
- CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
- I->getSecond().GlobalRecordAddr);
+ if (!IsInTTDRegion &&
+ (WithSPMDCheck ||
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
+ llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
+ Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(NonSPMDBB);
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
+ CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
+ CGF.EmitBlock(ExitBB);
+ } else if (IsInTTDRegion) {
+ assert(GlobalizedRecords.back().RegionCounter > 0 &&
+ "region counter must be > 0.");
+ --GlobalizedRecords.back().RegionCounter;
+ // Emit the restore function only in the target region.
+ if (GlobalizedRecords.back().RegionCounter == 0) {
+ QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/16, /*Signed=*/0);
+ llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
+ Address(GlobalizedRecords.back().UseSharedMemory,
+ CGM.getContext().getTypeAlignInChars(Int16Ty)),
+ /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
+ llvm::Value *Args[] = {
+ llvm::ConstantInt::get(
+ CGM.Int16Ty,
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
+ IsInSharedMemory};
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_restore_team_static_memory),
+ Args);
+ }
+ } else {
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
+ I->getSecond().GlobalRecordAddr);
+ }
}
}
}
@@ -1830,7 +2515,7 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
// passed from the outside of the target region.
CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
- // There's somehting to share.
+ // There's something to share.
if (!CapturedVars.empty()) {
// Prepare for parallel region. Indicate the outlined function.
Address SharedArgs =
@@ -1884,30 +2569,24 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
Work.emplace_back(WFn);
};
- auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen, &CodeGen,
- &ThreadIDAddr](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
- RegionCodeGenTy RCG(CodeGen);
+ auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
if (IsInParallelRegion) {
SeqGen(CGF, Action);
} else if (IsInTargetMasterThreadRegion) {
L0ParallelGen(CGF, Action);
- } else if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_NonSPMD) {
- RCG(CGF);
} else {
// Check for master and then parallelism:
// if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
- // Serialized execution.
- // } else if (master) {
- // Worker call.
+ // Serialized execution.
// } else {
- // Outlined function call.
+ // Worker call.
// }
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
- llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
+ llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
@@ -1920,29 +2599,17 @@ void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
{RTLoc, ThreadID});
llvm::Value *Res = Bld.CreateIsNotNull(PL);
- Bld.CreateCondBr(Res, SeqBB, MasterCheckBB);
+ Bld.CreateCondBr(Res, SeqBB, MasterBB);
CGF.EmitBlock(SeqBB);
SeqGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(MasterCheckBB);
- llvm::BasicBlock *MasterThenBB = CGF.createBasicBlock("master.then");
- llvm::BasicBlock *ElseBlock = CGF.createBasicBlock("omp_if.else");
- llvm::Value *IsMaster =
- Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
- Bld.CreateCondBr(IsMaster, MasterThenBB, ElseBlock);
- CGF.EmitBlock(MasterThenBB);
+ CGF.EmitBlock(MasterBB);
L0ParallelGen(CGF, Action);
CGF.EmitBranch(ExitBB);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ElseBlock);
- // In the worker need to use the real thread id.
- ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- RCG(CGF);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
// Emit the continuation block for code after the if.
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
@@ -2013,6 +2680,34 @@ void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
}
}
+void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
+ // This function does not use parameters, so we can emit just default values.
+ llvm::Value *Args[] = {
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(getIdentTyPointerTy())),
+ llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
+ CGF.EmitRuntimeCall(
+ createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
+}
+
+void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool,
+ bool) {
+ // Always emit simple barriers!
+ if (!CGF.HaveInsertPoint())
+ return;
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ unsigned Flags = getDefaultFlagsForBarriers(Kind);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
+ getThreadID(CGF, Loc)};
+ CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
+}
+
void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
@@ -2055,14 +2750,16 @@ void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
CGF.EmitBlock(BodyBB);
// Output the critical statement.
- CriticalOpGen(CGF);
+ CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
+ Hint);
// After the body surrounded by the critical region, the single executing
// thread will jump to the synchronisation point.
// Block waits for all threads in current team to finish then increments the
// counter variable and returns to the loop.
CGF.EmitBlock(SyncBB);
- getNVPTXCTABarrier(CGF);
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
llvm::Value *IncCounterVal =
CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
@@ -2184,11 +2881,12 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
IntType, Offset, Loc);
CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
- Ptr = Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
- ElemPtr =
+ Address LocalPtr =
+ Bld.CreateConstGEP(Ptr, 1, CharUnits::fromQuantity(IntSize));
+ Address LocalElemPtr =
Bld.CreateConstGEP(ElemPtr, 1, CharUnits::fromQuantity(IntSize));
- PhiSrc->addIncoming(Ptr.getPointer(), ThenBB);
- PhiDest->addIncoming(ElemPtr.getPointer(), ThenBB);
+ PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
+ PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
CGF.EmitBranch(PreCondBB);
CGF.EmitBlock(ExitBB);
} else {
@@ -2414,235 +3112,18 @@ static void emitReductionListCopy(
}
}
-/// This function emits a helper that loads data from the scratchpad array
-/// and (optionally) reduces it with the input operand.
-///
-/// load_and_reduce(local, scratchpad, index, width, should_reduce)
-/// reduce_data remote;
-/// for elem in remote:
-/// remote.elem = Scratchpad[elem_id][index]
-/// if (should_reduce)
-/// local = local @ remote
-/// else
-/// local = remote
-static llvm::Value *emitReduceScratchpadFunction(
- CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy, llvm::Value *ReduceFn, SourceLocation Loc) {
- ASTContext &C = CGM.getContext();
- QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
-
- // Destination of the copy.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Base address of the scratchpad array, with each element storing a
- // Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // A source index into the scratchpad array.
- ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // Row width of an element in the scratchpad array, typically
- // the number of teams.
- ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // If should_reduce == 1, then it's load AND reduce,
- // If should_reduce == 0 (or otherwise), then it only loads (+ copy).
- // The latter case is used for initialization.
- ImplicitParamDecl ShouldReduceArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- Int32Ty, ImplicitParamDecl::Other);
-
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&ScratchPadArg);
- Args.push_back(&IndexArg);
- Args.push_back(&WidthArg);
- Args.push_back(&ShouldReduceArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_load_and_reduce", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- // Get local Reduce list pointer.
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address ReduceListAddr(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
- llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
-
- Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
- CGM.SizeTy, /*isSigned=*/true);
-
- Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
- llvm::Value *WidthVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false, Int32Ty, Loc),
- CGM.SizeTy, /*isSigned=*/true);
-
- Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
- llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
- AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, Loc);
-
- // The absolute ptr address to the base addr of the next element to copy.
- llvm::Value *CumulativeElemBasePtr =
- Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
- Address SrcDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
-
- // Create a Remote Reduce list to store the elements read from the
- // scratchpad array.
- Address RemoteReduceList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_red_list");
-
- // Assemble remote Reduce list from scratchpad array.
- emitReductionListCopy(ScratchpadToThread, CGF, ReductionArrayTy, Privates,
- SrcDataAddr, RemoteReduceList,
- {/*RemoteLaneOffset=*/nullptr,
- /*ScratchpadIndex=*/IndexVal,
- /*ScratchpadWidth=*/WidthVal});
-
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- llvm::Value *CondReduce = Bld.CreateIsNotNull(ShouldReduceVal);
- Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
-
- CGF.EmitBlock(ThenBB);
- // We should reduce with the local Reduce list.
- // reduce_function(LocalReduceList, RemoteReduceList)
- llvm::Value *LocalDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ReduceListAddr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- RemoteReduceList.getPointer(), CGF.VoidPtrTy);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
- CGF, Loc, ReduceFn, {LocalDataPtr, RemoteDataPtr});
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- // No reduction; just copy:
- // Local Reduce list = Remote Reduce list.
- emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
- RemoteReduceList, ReduceListAddr);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
-
- CGF.FinishFunction();
- return Fn;
-}
-
-/// This function emits a helper that stores reduced data from the team
-/// master to a scratchpad array in global memory.
-///
-/// for elem in Reduce List:
-/// scratchpad[elem_id][index] = elem
-///
-static llvm::Value *emitCopyToScratchpad(CodeGenModule &CGM,
- ArrayRef<const Expr *> Privates,
- QualType ReductionArrayTy,
- SourceLocation Loc) {
-
- ASTContext &C = CGM.getContext();
- QualType Int32Ty = C.getIntTypeForBitwidth(32, /*Signed=*/1);
-
- // Source of the copy.
- ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // Base address of the scratchpad array, with each element storing a
- // Reduce list per team.
- ImplicitParamDecl ScratchPadArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
- // A destination index into the scratchpad array, typically the team
- // identifier.
- ImplicitParamDecl IndexArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
- // Row width of an element in the scratchpad array, typically
- // the number of teams.
- ImplicitParamDecl WidthArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int32Ty,
- ImplicitParamDecl::Other);
-
- FunctionArgList Args;
- Args.push_back(&ReduceListArg);
- Args.push_back(&ScratchPadArg);
- Args.push_back(&IndexArg);
- Args.push_back(&WidthArg);
-
- const CGFunctionInfo &CGFI =
- CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- auto *Fn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- "_omp_reduction_copy_to_scratchpad", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
- Fn->setDoesNotRecurse();
- CodeGenFunction CGF(CGM);
- CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
-
- CGBuilderTy &Bld = CGF.Builder;
-
- Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
- Address SrcDataAddr(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
-
- Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
- llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
- AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
-
- Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
- llvm::Value *IndexVal = Bld.CreateIntCast(
- CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false, Int32Ty, Loc),
- CGF.SizeTy, /*isSigned=*/true);
-
- Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
- llvm::Value *WidthVal =
- Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
- Int32Ty, SourceLocation()),
- CGF.SizeTy, /*isSigned=*/true);
-
- // The absolute ptr address to the base addr of the next element to copy.
- llvm::Value *CumulativeElemBasePtr =
- Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
- Address DestDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
-
- emitReductionListCopy(ThreadToScratchpad, CGF, ReductionArrayTy, Privates,
- SrcDataAddr, DestDataAddr,
- {/*RemoteLaneOffset=*/nullptr,
- /*ScratchpadIndex=*/IndexVal,
- /*ScratchpadWidth=*/WidthVal});
-
- CGF.FinishFunction();
- return Fn;
-}
-
/// This function emits a helper that gathers Reduce lists from the first
/// lane of every active warp to lanes in the first warp.
///
/// void inter_warp_copy_func(void* reduce_data, num_warps)
/// shared smem[warp_size];
/// For all data entries D in reduce_data:
+/// sync
/// If (I am the first lane in each warp)
/// Copy my local D to smem[warp_id]
/// sync
/// if (I am the first warp)
/// Copy smem[thread_id] to my local D
-/// sync
static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
ArrayRef<const Expr *> Privates,
QualType ReductionArrayTy,
@@ -2688,11 +3169,10 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::GlobalVariable *TransferMedium =
M.getGlobalVariable(TransferMediumName);
if (!TransferMedium) {
- auto *Ty = llvm::ArrayType::get(CGM.Int64Ty, WarpSize);
+ auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
TransferMedium = new llvm::GlobalVariable(
- M, Ty,
- /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
+ M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
llvm::Constant::getNullValue(Ty), TransferMediumName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
SharedAddressSpace);
@@ -2710,7 +3190,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
- C.VoidPtrTy, SourceLocation()),
+ C.VoidPtrTy, Loc),
CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
CGF.getPointerAlign());
@@ -2720,121 +3200,151 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// Warp master copies reduce element to transfer medium in __shared__
// memory.
//
- llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
-
- // if (lane_id == 0)
- llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
- Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
- CGF.EmitBlock(ThenBB);
-
- // Reduce element = LocalReduceList[i]
- Address ElemPtrPtrAddr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
- llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
- ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- // elemptr = (type[i]*)(elemptrptr)
- Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
- ElemPtr = Bld.CreateElementBitCast(
- ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // Get pointer to location in transfer medium.
- // MediumPtr = &medium[warp_id]
- llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- Address MediumPtr(MediumPtrVal, C.getTypeAlignInChars(Private->getType()));
- // Casting to actual data type.
- // MediumPtr = (type[i]*)MediumPtrAddr;
- MediumPtr = Bld.CreateElementBitCast(
- MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // elem = *elemptr
- //*MediumPtr = elem
- if (Private->getType()->isScalarType()) {
- llvm::Value *Elem = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
- Private->getType(), Loc);
- // Store the source element value to the dest element address.
- CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/false,
- Private->getType());
- } else {
- CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
- CGF.MakeAddrLValue(MediumPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- }
-
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(ElseBB);
- Bld.CreateBr(MergeBB);
-
- CGF.EmitBlock(MergeBB);
+ unsigned RealTySize =
+ C.getTypeSizeInChars(Private->getType())
+ .alignTo(C.getTypeAlignInChars(Private->getType()))
+ .getQuantity();
+ for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
+ unsigned NumIters = RealTySize / TySize;
+ if (NumIters == 0)
+ continue;
+ QualType CType = C.getIntTypeForBitwidth(
+ C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
+ llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
+ CharUnits Align = CharUnits::fromQuantity(TySize);
+ llvm::Value *Cnt = nullptr;
+ Address CntAddr = Address::invalid();
+ llvm::BasicBlock *PrecondBB = nullptr;
+ llvm::BasicBlock *ExitBB = nullptr;
+ if (NumIters > 1) {
+ CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
+ CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
+ /*Volatile=*/false, C.IntTy);
+ PrecondBB = CGF.createBasicBlock("precond");
+ ExitBB = CGF.createBasicBlock("exit");
+ llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(PrecondBB);
+ Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
+ llvm::Value *Cmp =
+ Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
+ Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
+ CGF.EmitBlock(BodyBB);
+ }
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
+
+ // if (lane_id == 0)
+ llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
+ Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
+ CGF.EmitBlock(ThenBB);
- Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
- llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
- AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
+ // Reduce element = LocalReduceList[i]
+ Address ElemPtrPtrAddr =
+ Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
+ ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
+ // elemptr = ((CopyType*)(elemptrptr)) + I
+ Address ElemPtr = Address(ElemPtrPtr, Align);
+ ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
+ if (NumIters > 1) {
+ ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
+ ElemPtr.getAlignment());
+ }
- llvm::Value *NumActiveThreads = Bld.CreateNSWMul(
- NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
- // named_barrier_sync(ParallelBarrierID, num_active_threads)
- syncParallelThreads(CGF, NumActiveThreads);
+ // Get pointer to location in transfer medium.
+ // MediumPtr = &medium[warp_id]
+ llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
+ Address MediumPtr(MediumPtrVal, Align);
+ // Casting to actual data type.
+ // MediumPtr = (CopyType*)MediumPtrAddr;
+ MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
+
+ // elem = *elemptr
+ //*MediumPtr = elem
+ llvm::Value *Elem =
+ CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
+ // Store the source element value to the dest element address.
+ CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
+
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(ElseBB);
+ Bld.CreateBr(MergeBB);
+
+ CGF.EmitBlock(MergeBB);
+
+ // kmpc_barrier.
+ CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
+ /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+
+ //
+ // Warp 0 copies reduce element from transfer medium.
+ //
+ llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
+ llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
+ llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
+
+ Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
+ llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
+ AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
+
+ // Up to 32 threads in warp 0 are active.
+ llvm::Value *IsActiveThread =
+ Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
+ Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
+
+ CGF.EmitBlock(W0ThenBB);
+
+ // SrcMediumPtr = &medium[tid]
+ llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
+ TransferMedium,
+ {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
+ Address SrcMediumPtr(SrcMediumPtrVal, Align);
+ // SrcMediumVal = *SrcMediumPtr;
+ SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
+
+ // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
+ Address TargetElemPtrPtr =
+ Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
+ llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
+ TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
+ Address TargetElemPtr = Address(TargetElemPtrVal, Align);
+ TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
+ if (NumIters > 1) {
+ TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
+ TargetElemPtr.getAlignment());
+ }
- //
- // Warp 0 copies reduce element from transfer medium.
- //
- llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
- llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
- llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
-
- // Up to 32 threads in warp 0 are active.
- llvm::Value *IsActiveThread =
- Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
- Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
-
- CGF.EmitBlock(W0ThenBB);
-
- // SrcMediumPtr = &medium[tid]
- llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- Address SrcMediumPtr(SrcMediumPtrVal,
- C.getTypeAlignInChars(Private->getType()));
- // SrcMediumVal = *SrcMediumPtr;
- SrcMediumPtr = Bld.CreateElementBitCast(
- SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // TargetElemPtr = (type[i]*)(SrcDataAddr[i])
- Address TargetElemPtrPtr =
- Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
- llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
- TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
- Address TargetElemPtr =
- Address(TargetElemPtrVal, C.getTypeAlignInChars(Private->getType()));
- TargetElemPtr = Bld.CreateElementBitCast(
- TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
-
- // *TargetElemPtr = SrcMediumVal;
- if (Private->getType()->isScalarType()) {
- llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
- SrcMediumPtr, /*Volatile=*/false, Private->getType(), Loc);
+ // *TargetElemPtr = SrcMediumVal;
+ llvm::Value *SrcMediumValue =
+ CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
- Private->getType());
- } else {
- CGF.EmitAggregateCopy(
- CGF.MakeAddrLValue(SrcMediumPtr, Private->getType()),
- CGF.MakeAddrLValue(TargetElemPtr, Private->getType()),
- Private->getType(), AggValueSlot::DoesNotOverlap);
- }
- Bld.CreateBr(W0MergeBB);
+ CType);
+ Bld.CreateBr(W0MergeBB);
- CGF.EmitBlock(W0ElseBB);
- Bld.CreateBr(W0MergeBB);
+ CGF.EmitBlock(W0ElseBB);
+ Bld.CreateBr(W0MergeBB);
- CGF.EmitBlock(W0MergeBB);
+ CGF.EmitBlock(W0MergeBB);
- // While warp 0 copies values from transfer medium, all other warps must
- // wait.
- syncParallelThreads(CGF, NumActiveThreads);
+ if (NumIters > 1) {
+ Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
+ CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
+ CGF.EmitBranch(PrecondBB);
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB);
+ }
+ RealTySize %= TySize;
+ }
++Idx;
}
@@ -3103,7 +3613,7 @@ static llvm::Value *emitShuffleAndReduceFunction(
/// 3. Call the OpenMP runtime on the GPU to reduce within a team
/// and store the result on the team master:
///
-/// __kmpc_nvptx_parallel_reduce_nowait(...,
+/// __kmpc_nvptx_parallel_reduce_nowait_v2(...,
/// reduceData, shuffleReduceFn, interWarpCpyFn)
///
/// where:
@@ -3274,7 +3784,7 @@ static llvm::Value *emitShuffleAndReduceFunction(
/// Intra-Team Reduction
///
/// This function, as implemented in the runtime call
-/// '__kmpc_nvptx_parallel_reduce_nowait', aggregates data across OpenMP
+/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
/// threads in a team. It first reduces within a warp using the
/// aforementioned algorithms. We then proceed to gather all such
/// reduced values at the first warp.
@@ -3297,7 +3807,7 @@ static llvm::Value *emitShuffleAndReduceFunction(
/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
/// the k'th worker reduces every k'th element.
///
-/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait' to
+/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
/// reduce across workers and compute a globally reduced value.
///
void CGOpenMPRuntimeNVPTX::emitReduction(
@@ -3308,125 +3818,116 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
return;
bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
+#ifndef NDEBUG
bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
- bool SimdReduction = isOpenMPSimdDirective(Options.ReductionKind);
- assert((TeamsReduction || ParallelReduction || SimdReduction) &&
- "Invalid reduction selection in emitReduction.");
+#endif
if (Options.SimpleReduction) {
+ assert(!TeamsReduction && !ParallelReduction &&
+ "Invalid reduction selection in emitReduction.");
CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
ReductionOps, Options);
return;
}
- ASTContext &C = CGM.getContext();
-
- // 1. Build a list of reduction variables.
- // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
- auto Size = RHSExprs.size();
- for (const Expr *E : Privates) {
- if (E->getType()->isVariablyModifiedType())
- // Reserve place for array size.
- ++Size;
- }
- llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Address ReductionList =
- CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
- unsigned Idx = 0;
- for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
- Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- CGF.Builder.CreateStore(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
- Elem);
- if ((*IPriv)->getType()->isVariablyModifiedType()) {
- // Store array size.
- ++Idx;
- Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
- CGF.getPointerSize());
- llvm::Value *Size = CGF.Builder.CreateIntCast(
- CGF.getVLASize(
- CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
- .NumElts,
- CGF.SizeTy, /*isSigned=*/false);
- CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
- Elem);
- }
- }
-
- // 2. Emit reduce_func().
- llvm::Value *ReductionFn = emitReductionFunction(
- CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
- Privates, LHSExprs, RHSExprs, ReductionOps);
+ assert((TeamsReduction || ParallelReduction) &&
+ "Invalid reduction selection in emitReduction.");
- // 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
+ // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
// RedList, shuffle_reduce_func, interwarp_copy_func);
+ // or
+ // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReductionList.getPointer(), CGF.VoidPtrTy);
-
- llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
- CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
- llvm::Value *InterWarpCopyFn =
- emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
-
- llvm::Value *Args[] = {ThreadId,
- CGF.Builder.getInt32(RHSExprs.size()),
- ReductionArrayTySize,
- RL,
- ShuffleAndReduceFn,
- InterWarpCopyFn};
-
- llvm::Value *Res = nullptr;
- if (ParallelReduction)
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
- Args);
- else if (SimdReduction)
- Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_simd_reduce_nowait),
- Args);
- if (TeamsReduction) {
- llvm::Value *ScratchPadCopyFn =
- emitCopyToScratchpad(CGM, Privates, ReductionArrayTy, Loc);
- llvm::Value *LoadAndReduceFn = emitReduceScratchpadFunction(
+ llvm::Value *Res;
+ if (ParallelReduction) {
+ ASTContext &C = CGM.getContext();
+ // 1. Build a list of reduction variables.
+ // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
+ auto Size = RHSExprs.size();
+ for (const Expr *E : Privates) {
+ if (E->getType()->isVariablyModifiedType())
+ // Reserve place for array size.
+ ++Size;
+ }
+ llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
+ QualType ReductionArrayTy =
+ C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address ReductionList =
+ CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
+ auto IPriv = Privates.begin();
+ unsigned Idx = 0;
+ for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
+ Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
+ Elem);
+ if ((*IPriv)->getType()->isVariablyModifiedType()) {
+ // Store array size.
+ ++Idx;
+ Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
+ CGF.getPointerSize());
+ llvm::Value *Size = CGF.Builder.CreateIntCast(
+ CGF.getVLASize(
+ CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
+ .NumElts,
+ CGF.SizeTy, /*isSigned=*/false);
+ CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
+ Elem);
+ }
+ }
+
+ llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
+ llvm::Value *ReductionFn = emitReductionFunction(
+ CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
+ llvm::Value *InterWarpCopyFn =
+ emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
- llvm::Value *Args[] = {ThreadId,
+ llvm::Value *Args[] = {RTLoc,
+ ThreadId,
CGF.Builder.getInt32(RHSExprs.size()),
ReductionArrayTySize,
RL,
ShuffleAndReduceFn,
- InterWarpCopyFn,
- ScratchPadCopyFn,
- LoadAndReduceFn};
+ InterWarpCopyFn};
+
+ Res = CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_parallel_reduce_nowait_v2),
+ Args);
+ } else {
+ assert(TeamsReduction && "expected teams reduction.");
+ std::string Name = getName({"reduction"});
+ llvm::Value *Lock = getCriticalRegionLock(Name);
+ llvm::Value *Args[] = {RTLoc, ThreadId, Lock};
Res = CGF.EmitRuntimeCall(
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_teams_reduce_nowait),
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_simple),
Args);
}
- // 5. Build switch(res)
- llvm::BasicBlock *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
- llvm::SwitchInst *SwInst =
- CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
+ // 5. Build if (res == 1)
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
+ llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
+ llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
+ Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
+ CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
- // 6. Build case 1: where we have reduced values in the master
+ // 6. Build then branch: where we have reduced values in the master
// thread in each team.
// __kmpc_end_reduce{_nowait}(<gtid>);
// break;
- llvm::BasicBlock *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
- SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
- CGF.EmitBlock(Case1BB);
+ CGF.EmitBlock(ThenBB);
// Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
- llvm::Value *EndArgs[] = {ThreadId};
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
this](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto IPriv = Privates.begin();
@@ -3440,15 +3941,33 @@ void CGOpenMPRuntimeNVPTX::emitReduction(
++IRHS;
}
};
- RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
- EndArgs);
- RCG.setAction(Action);
- RCG(CGF);
- CGF.EmitBranch(DefaultBB);
- CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
+ if (ParallelReduction) {
+ llvm::Value *EndArgs[] = {ThreadId};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
+ } else {
+ assert(TeamsReduction && "expected teams reduction.");
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ std::string Name = getName({"reduction"});
+ llvm::Value *Lock = getCriticalRegionLock(Name);
+ llvm::Value *EndArgs[] = {RTLoc, ThreadId, Lock};
+ RegionCodeGenTy RCG(CodeGen);
+ NVPTXActionTy Action(
+ nullptr, llvm::None,
+ createNVPTXRuntimeFunction(
+ OMPRTL_NVPTX__kmpc_nvptx_teams_end_reduce_nowait_simple),
+ EndArgs);
+ RCG.setAction(Action);
+ RCG(CGF);
+ }
+ // There is no need to emit line number for unconditional branch.
+ (void)ApplyDebugLocation::CreateEmpty(CGF);
+ CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
}
const VarDecl *
@@ -3478,7 +3997,7 @@ CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
return ParmVarDecl::Create(
CGM.getContext(),
const_cast<DeclContext *>(NativeParam->getDeclContext()),
- NativeParam->getLocStart(), NativeParam->getLocation(),
+ NativeParam->getBeginLoc(), NativeParam->getLocation(),
NativeParam->getIdentifier(), ArgType,
/*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
}
@@ -3556,10 +4075,10 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
QualType Int32QTy =
Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
- ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
+ ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int16QTy,
ImplicitParamDecl::Other);
- ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getLocStart(),
+ ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int32QTy,
ImplicitParamDecl::Other);
WrapperArgs.emplace_back(&ParallelLevelArg);
@@ -3577,7 +4096,7 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
- D.getLocStart(), D.getLocStart());
+ D.getBeginLoc(), D.getBeginLoc());
const auto *RD = CS.getCapturedRecordDecl();
auto CurField = RD->field_begin();
@@ -3662,7 +4181,7 @@ llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
}
}
- emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedParallelFn, Args);
+ emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
CGF.FinishFunction();
return Fn;
}
@@ -3675,6 +4194,8 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
assert(D && "Expected function or captured|block decl.");
assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
"Function is registered already.");
+ assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
+ "Team is set but not processed.");
const Stmt *Body = nullptr;
bool NeedToDelayGlobalization = false;
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
@@ -3684,12 +4205,18 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
} else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
Body = CD->getBody();
NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
+ if (NeedToDelayGlobalization &&
+ getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
+ return;
}
if (!Body)
return;
- CheckVarsEscapingDeclContext VarChecker(CGF);
+ CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
VarChecker.Visit(Body);
- const RecordDecl *GlobalizedVarsRecord = VarChecker.getGlobalizedRecord();
+ const RecordDecl *GlobalizedVarsRecord =
+ VarChecker.getGlobalizedRecord(IsInTTDRegion);
+ TeamAndReductions.first = nullptr;
+ TeamAndReductions.second.clear();
ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
VarChecker.getEscapedVariableLengthDecls();
if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
@@ -3707,16 +4234,30 @@ void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(std::make_pair(VD, std::make_pair(FD, Address::invalid())));
+ Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
+ }
+ if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
+ CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
+ VarChecker.Visit(Body);
+ I->getSecond().SecondaryGlobalRecord =
+ VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
+ I->getSecond().SecondaryLocalVarData.emplace();
+ DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
+ for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
+ assert(VD->isCanonicalDecl() && "Expected canonical declaration");
+ const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
+ Data.insert(
+ std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
+ }
}
if (!NeedToDelayGlobalization) {
- emitGenericVarsProlog(CGF, D->getLocStart());
+ emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
struct GlobalizationScope final : EHScopeStack::Cleanup {
GlobalizationScope() = default;
void Emit(CodeGenFunction &CGF, Flags flags) override {
static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
- .emitGenericVarsEpilog(CGF);
+ .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
}
};
CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
@@ -3734,7 +4275,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
return Address::invalid();
auto VDI = I->getSecond().LocalVarData.find(VD);
if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.second;
+ return VDI->second.PrivateAddr;
if (VD->hasAttrs()) {
for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
E(VD->attr_end());
@@ -3743,7 +4284,7 @@ Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
->getCanonicalDecl());
if (VDI != I->getSecond().LocalVarData.end())
- return VDI->second.second;
+ return VDI->second.PrivateAddr;
}
}
return Address::invalid();
@@ -3753,3 +4294,311 @@ void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
FunctionGlobalizedDecls.erase(CGF.CurFn);
CGOpenMPRuntime::functionFinished(CGF);
}
+
+void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ OpenMPDistScheduleClauseKind &ScheduleKind,
+ llvm::Value *&Chunk) const {
+ if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
+ ScheduleKind = OMPC_DIST_SCHEDULE_static;
+ Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ S.getIterationVariable()->getType(), S.getBeginLoc());
+ return;
+ }
+ CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
+ CGF, S, ScheduleKind, Chunk);
+}
+
+void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ OpenMPScheduleClauseKind &ScheduleKind,
+ const Expr *&ChunkExpr) const {
+ ScheduleKind = OMPC_SCHEDULE_static;
+ // Chunk size is 1 in this case.
+ llvm::APInt ChunkSize(32, 1);
+ ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
+ CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
+ SourceLocation());
+}
+
+void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
+ assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
+ " Expected target-based directive.");
+ const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
+ for (const CapturedStmt::Capture &C : CS->captures()) {
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (!C.capturesVariable())
+ continue;
+ const VarDecl *VD = C.getCapturedVar();
+ const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl();
+ if (!RD || !RD->isLambda())
+ continue;
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ LValue VDLVal;
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
+ else
+ VDLVal = CGF.MakeAddrLValue(
+ VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture = nullptr;
+ RD->getCaptureFields(Captures, ThisCapture);
+ if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
+ LValue ThisLVal =
+ CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
+ llvm::Value *CXXThis = CGF.LoadCXXThis();
+ CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
+ }
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() != LCK_ByRef)
+ continue;
+ const VarDecl *VD = LC.getCapturedVar();
+ if (!CS->capturesVariable(VD))
+ continue;
+ auto It = Captures.find(VD);
+ assert(It != Captures.end() && "Found lambda capture without field.");
+ LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
+ Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ if (VD->getType().getCanonicalType()->isReferenceType())
+ VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
+ VD->getType().getCanonicalType())
+ .getAddress();
+ CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
+ }
+ }
+}
+
+// Get current CudaArch and ignore any unknown values
+static CudaArch getCudaArch(CodeGenModule &CGM) {
+ if (!CGM.getTarget().hasFeature("ptx"))
+ return CudaArch::UNKNOWN;
+ llvm::StringMap<bool> Features;
+ CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
+ CGM.getTarget().getTargetOpts().CPU,
+ CGM.getTarget().getTargetOpts().Features);
+ for (const auto &Feature : Features) {
+ if (Feature.getValue()) {
+ CudaArch Arch = StringToCudaArch(Feature.getKey());
+ if (Arch != CudaArch::UNKNOWN)
+ return Arch;
+ }
+ }
+ return CudaArch::UNKNOWN;
+}
+
+/// Check to see if target architecture supports unified addressing which is
+/// a restriction for OpenMP requires clause "unified_shared_memory".
+void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
+ CodeGenModule &CGM, const OMPRequiresDecl *D) const {
+ for (const OMPClause *Clause : D->clauselists()) {
+ if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
+ switch (getCudaArch(CGM)) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
+ CGM.Error(Clause->getBeginLoc(),
+ "Target architecture does not support unified addressing");
+ return;
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ }
+ }
+}
+
+/// Get number of SMs and number of blocks per SM.
+static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
+ std::pair<unsigned, unsigned> Data;
+ if (CGM.getLangOpts().OpenMPCUDANumSMs)
+ Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
+ if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
+ Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
+ if (Data.first && Data.second)
+ return Data;
+ switch (getCudaArch(CGM)) {
+ case CudaArch::SM_20:
+ case CudaArch::SM_21:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ case CudaArch::SM_50:
+ case CudaArch::SM_52:
+ case CudaArch::SM_53:
+ return {16, 16};
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
+ return {56, 32};
+ case CudaArch::SM_70:
+ case CudaArch::SM_72:
+ case CudaArch::SM_75:
+ return {84, 32};
+ case CudaArch::GFX600:
+ case CudaArch::GFX601:
+ case CudaArch::GFX700:
+ case CudaArch::GFX701:
+ case CudaArch::GFX702:
+ case CudaArch::GFX703:
+ case CudaArch::GFX704:
+ case CudaArch::GFX801:
+ case CudaArch::GFX802:
+ case CudaArch::GFX803:
+ case CudaArch::GFX810:
+ case CudaArch::GFX900:
+ case CudaArch::GFX902:
+ case CudaArch::GFX904:
+ case CudaArch::GFX906:
+ case CudaArch::GFX909:
+ case CudaArch::UNKNOWN:
+ break;
+ case CudaArch::LAST:
+ llvm_unreachable("Unexpected Cuda arch.");
+ }
+ llvm_unreachable("Unexpected NVPTX target without ptx feature.");
+}
+
+void CGOpenMPRuntimeNVPTX::clear() {
+ if (!GlobalizedRecords.empty()) {
+ ASTContext &C = CGM.getContext();
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
+ llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ StaticRD->startDefinition();
+ RecordDecl *SharedStaticRD = C.buildImplicitRecord(
+ "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
+ SharedStaticRD->startDefinition();
+ for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
+ if (Records.Records.empty())
+ continue;
+ unsigned Size = 0;
+ unsigned RecAlignment = 0;
+ for (const RecordDecl *RD : Records.Records) {
+ QualType RDTy = C.getRecordType(RD);
+ unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
+ RecAlignment = std::max(RecAlignment, Alignment);
+ unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
+ Size =
+ llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
+ }
+ Size = llvm::alignTo(Size, RecAlignment);
+ llvm::APInt ArySize(/*numBits=*/64, Size);
+ QualType SubTy = C.getConstantArrayType(
+ C.CharTy, ArySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ const bool UseSharedMemory = Size <= SharedMemorySize;
+ auto *Field =
+ FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
+ SourceLocation(), SourceLocation(), nullptr, SubTy,
+ C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ if (UseSharedMemory) {
+ SharedStaticRD->addDecl(Field);
+ SharedRecs.push_back(&Records);
+ } else {
+ StaticRD->addDecl(Field);
+ GlobalRecs.push_back(&Records);
+ }
+ Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
+ Records.UseSharedMemory->setInitializer(
+ llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
+ }
+ // Allocate SharedMemorySize buffer for the shared memory.
+ // FIXME: nvlink does not handle weak linkage correctly (object with the
+ // different size are reported as erroneous).
+ // Restore this code as sson as nvlink is fixed.
+ if (!SharedStaticRD->field_empty()) {
+ llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
+ QualType SubTy = C.getConstantArrayType(
+ C.CharTy, ArySize, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ auto *Field = FieldDecl::Create(
+ C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
+ C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ SharedStaticRD->addDecl(Field);
+ }
+ SharedStaticRD->completeDefinition();
+ if (!SharedStaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(SharedStaticRD);
+ llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMStaticTy,
+ /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
+ llvm::Constant::getNullValue(LLVMStaticTy),
+ "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ C.getTargetAddressSpace(LangAS::cuda_shared));
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
+ StaticRD->completeDefinition();
+ if (!StaticRD->field_empty()) {
+ QualType StaticTy = C.getRecordType(StaticRD);
+ std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
+ llvm::APInt Size1(32, SMsBlockPerSM.second);
+ QualType Arr1Ty =
+ C.getConstantArrayType(StaticTy, Size1, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::APInt Size2(32, SMsBlockPerSM.first);
+ QualType Arr2Ty = C.getConstantArrayType(Arr1Ty, Size2, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), LLVMArr2Ty,
+ /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
+ llvm::Constant::getNullValue(LLVMArr2Ty),
+ "_openmp_static_glob_rd_$_");
+ auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GV, CGM.VoidPtrTy);
+ for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
+ Rec->Buffer->replaceAllUsesWith(Replacement);
+ Rec->Buffer->eraseFromParent();
+ }
+ }
+ }
+ CGOpenMPRuntime::clear();
+}
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index f83e99f8a3b7..6091610c37e3 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -56,6 +56,11 @@ private:
ExecutionMode getExecutionMode() const;
+ bool requiresFullRuntime() const { return RequiresFullRuntime; }
+
+ /// Get barrier to synchronize all threads in a block.
+ void syncCTAThreads(CodeGenFunction &CGF);
+
/// Emit the worker function for the current target region.
void emitWorkerFunction(WorkerFunctionState &WST);
@@ -72,10 +77,11 @@ private:
void emitNonSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
/// Helper for generic variables globalization prolog.
- void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc);
+ void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc,
+ bool WithSPMDCheck = false);
/// Helper for generic variables globalization epilog.
- void emitGenericVarsEpilog(CodeGenFunction &CGF);
+ void emitGenericVarsEpilog(CodeGenFunction &CGF, bool WithSPMDCheck = false);
/// Helper for SPMD mode target directive's entry function.
void emitSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
@@ -179,8 +185,19 @@ protected:
return "__omp_outlined__";
}
+ /// Check if the default location must be constant.
+ /// Constant for NVPTX for better optimization.
+ bool isDefaultLocationConstant() const override { return true; }
+
+ /// Returns additional flags that can be stored in reserved_2 field of the
+ /// default location.
+ /// For NVPTX target contains data about SPMD/Non-SPMD execution mode +
+ /// Full/Lightweight runtime mode. Used for better optimization.
+ unsigned getDefaultLocationReserved2Flags() const override;
+
public:
explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
+ void clear() override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
@@ -260,6 +277,18 @@ public:
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
+ /// Emit an implicit/explicit barrier for OpenMP threads.
+ /// \param Kind Directive for which this implicit barrier call must be
+ /// generated. Must be OMPD_barrier for explicit barrier generation.
+ /// \param EmitChecks true if need to emit checks for cancellation barriers.
+ /// \param ForceSimpleCall true simple barrier call must be emitted, false if
+ /// runtime class decides which one to emit (simple or with cancellation
+ /// checks).
+ ///
+ void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPDirectiveKind Kind, bool EmitChecks = true,
+ bool ForceSimpleCall = false) override;
+
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
@@ -339,6 +368,26 @@ public:
///
void functionFinished(CodeGenFunction &CGF) override;
+ /// Choose a default value for the dist_schedule clause.
+ void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
+ llvm::Value *&Chunk) const override;
+
+ /// Choose a default value for the schedule clause.
+ void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
+ const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
+ const Expr *&ChunkExpr) const override;
+
+ /// Adjust some parameters for the target-based directives, like addresses of
+ /// the variables captured by reference in lambdas.
+ void adjustTargetSpecificDataForLambdas(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) const override;
+
+ /// Perform check on requires decl to ensure that target architecture
+ /// supports unified addressing
+ void checkArchForUnifiedAddressing(CodeGenModule &CGM,
+ const OMPRequiresDecl *D) const override;
+
private:
/// Track the execution mode when codegening directives within a target
/// region. The appropriate mode (SPMD/NON-SPMD) is set on entry to the
@@ -346,9 +395,15 @@ private:
/// to emit optimized code.
ExecutionMode CurrentExecutionMode = EM_Unknown;
+ /// Check if the full runtime is required (default - yes).
+ bool RequiresFullRuntime = true;
+
/// true if we're emitting the code for the target region and next parallel
/// region is L0 for sure.
bool IsInTargetMasterThreadRegion = false;
+ /// true if currently emitting code for target/teams/distribute region, false
+ /// - otherwise.
+ bool IsInTTDRegion = false;
/// true if we're definitely in the parallel region.
bool IsInParallelRegion = false;
@@ -362,23 +417,59 @@ private:
llvm::Function *createParallelDataSharingWrapper(
llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D);
+ /// The data for the single globalized variable.
+ struct MappedVarData {
+ /// Corresponding field in the global record.
+ const FieldDecl *FD = nullptr;
+ /// Corresponding address.
+ Address PrivateAddr = Address::invalid();
+ /// true, if only one element is required (for latprivates in SPMD mode),
+ /// false, if need to create based on the warp-size.
+ bool IsOnePerTeam = false;
+ MappedVarData() = delete;
+ MappedVarData(const FieldDecl *FD, bool IsOnePerTeam = false)
+ : FD(FD), IsOnePerTeam(IsOnePerTeam) {}
+ };
/// The map of local variables to their addresses in the global memory.
- using DeclToAddrMapTy = llvm::MapVector<const Decl *,
- std::pair<const FieldDecl *, Address>>;
+ using DeclToAddrMapTy = llvm::MapVector<const Decl *, MappedVarData>;
/// Set of the parameters passed by value escaping OpenMP context.
using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
struct FunctionData {
DeclToAddrMapTy LocalVarData;
+ llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
EscapedParamsTy EscapedParameters;
llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
const RecordDecl *GlobalRecord = nullptr;
+ llvm::Optional<const RecordDecl *> SecondaryGlobalRecord = llvm::None;
llvm::Value *GlobalRecordAddr = nullptr;
+ llvm::Value *IsInSPMDModeFlag = nullptr;
std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
};
/// Maps the function to the list of the globalized variables with their
/// addresses.
llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
+ /// List of records for the globalized variables in target/teams/distribute
+ /// contexts. Inner records are going to be joined into the single record,
+ /// while those resulting records are going to be joined into the single
+ /// union. This resulting union (one per CU) is the entry point for the static
+ /// memory management runtime functions.
+ struct GlobalPtrSizeRecsTy {
+ llvm::GlobalVariable *UseSharedMemory = nullptr;
+ llvm::GlobalVariable *RecSize = nullptr;
+ llvm::GlobalVariable *Buffer = nullptr;
+ SourceLocation Loc;
+ llvm::SmallVector<const RecordDecl *, 2> Records;
+ unsigned RegionCounter = 0;
+ };
+ llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
+ /// Shared pointer for the global memory in the global memory buffer used for
+ /// the given kernel.
+ llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
+ /// Pair of the Non-SPMD team and all reductions variables in this team
+ /// region.
+ std::pair<const Decl *, llvm::SmallVector<const ValueDecl *, 4>>
+ TeamAndReductions;
};
} // CodeGen namespace.
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 58aaae692552..c754541ac121 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 79662ec0099f..0242b48659d1 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -19,8 +19,6 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Sema/LoopHint.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
@@ -38,7 +36,7 @@ using namespace CodeGen;
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
SourceLocation Loc;
- Loc = S->getLocStart();
+ Loc = S->getBeginLoc();
DI->EmitLocation(Builder, Loc);
LastStopPoint = Loc;
@@ -932,6 +930,8 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
LexicalScope ForScope(*this, S.getSourceRange());
// Evaluate the first pieces before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
EmitStmt(S.getRangeStmt());
EmitStmt(S.getBeginStmt());
EmitStmt(S.getEndStmt());
@@ -1020,7 +1020,7 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
/// non-void. Fun stuff :).
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
if (requiresReturnValueCheck()) {
- llvm::Constant *SLoc = EmitCheckSourceLocation(S.getLocStart());
+ llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
auto *SLocPtr =
new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
llvm::GlobalVariable::PrivateLinkage, SLoc);
@@ -1045,10 +1045,9 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// exception to our over-conservative rules about not jumping to
// statements following block literals with non-trivial cleanups.
RunCleanupsScope cleanupScope(*this);
- if (const ExprWithCleanups *cleanups =
- dyn_cast_or_null<ExprWithCleanups>(RV)) {
- enterFullExpression(cleanups);
- RV = cleanups->getSubExpr();
+ if (const FullExpr *fe = dyn_cast_or_null<FullExpr>(RV)) {
+ enterFullExpression(fe);
+ RV = fe->getSubExpr();
}
// FIXME: Clean this up by using an LValue for ReturnTemp,
@@ -1821,11 +1820,14 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
- llvm::APSInt Result;
+ if (Info.requiresImmediateConstant()) {
+ llvm::APSInt AsmConst = InputExpr->EvaluateKnownConstInt(getContext());
+ return llvm::ConstantInt::get(getLLVMContext(), AsmConst);
+ }
+
+ Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), Result);
- assert(!Info.requiresImmediateConstant() &&
- "Required-immediate inlineasm arg isn't constant?");
+ return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
}
if (Info.allowsRegister() || !Info.allowsMemory())
@@ -1848,7 +1850,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
SmallVector<llvm::Metadata *, 8> Locs;
// Add the location of the first line to the MDNode.
Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
+ CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
@@ -1979,6 +1981,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
diag::err_asm_invalid_type_in_input)
<< OutExpr->getType() << OutputConstraint;
}
+
+ // Update largest vector width for any vector types.
+ if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
} else {
ArgTypes.push_back(Dest.getAddress().getType());
Args.push_back(Dest.getPointer());
@@ -2000,6 +2007,10 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
+ // Update largest vector width for any vector types.
+ if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -2080,6 +2091,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
<< InputExpr->getType() << InputConstraint;
+ // Update largest vector width for any vector types.
+ if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
Constraints += InputConstraint;
@@ -2272,7 +2288,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
"CapturedStmtInfo should be set when generating the captured function");
const CapturedDecl *CD = S.getCapturedDecl();
const RecordDecl *RD = S.getCapturedRecordDecl();
- SourceLocation Loc = S.getLocStart();
+ SourceLocation Loc = S.getBeginLoc();
assert(CD->hasBody() && "missing CapturedDecl body");
// Build the argument list.
@@ -2293,9 +2309,8 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
F->addFnAttr(llvm::Attribute::NoUnwind);
// Generate the function.
- StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
- CD->getLocation(),
- CD->getBody()->getLocStart());
+ StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
+ CD->getBody()->getBeginLoc());
// Set the context parameter in CapturedStmtInfo.
Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
@@ -2305,8 +2320,9 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
Ctx.getTagDeclType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
- auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
- S.getLocStart()).getScalarVal();
+ auto *ExprArg =
+ EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
+ .getScalarVal();
auto VAT = FD->getCapturedVLAType();
VLASizeMap[VAT->getSizeExpr()] = ExprArg;
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index 0d343f84c71f..eb1304d89345 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -73,7 +73,7 @@ public:
assert(VD == VD->getCanonicalDecl() &&
"Canonical decl must be captured.");
DeclRefExpr DRE(
- const_cast<VarDecl *>(VD),
+ CGF.getContext(), const_cast<VarDecl *>(VD),
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
@@ -191,7 +191,7 @@ public:
auto *VD = C.getCapturedVar();
assert(VD == VD->getCanonicalDecl() &&
"Canonical decl must be captured.");
- DeclRefExpr DRE(const_cast<VarDecl *>(VD),
+ DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
isCapturedVar(CGF, VD) ||
(CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
@@ -222,7 +222,7 @@ LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
LambdaCaptureFields.lookup(OrigVD) ||
(CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
(CurCodeDecl && isa<BlockDecl>(CurCodeDecl));
- DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), IsCaptured,
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
return EmitLValue(&DRE);
}
@@ -385,12 +385,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
FunctionDecl *DebugFunctionDecl = nullptr;
if (!FO.UIntPtrCastRequired) {
FunctionProtoType::ExtProtoInfo EPI;
+ QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
DebugFunctionDecl = FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), FO.S->getLocStart(),
- SourceLocation(), DeclarationName(), Ctx.VoidTy,
- Ctx.getTrivialTypeSourceInfo(
- Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI)),
- SC_Static, /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
+ Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
+ SourceLocation(), DeclarationName(), FunctionTy,
+ Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
+ /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
}
for (const FieldDecl *FD : RD->fields()) {
QualType ArgType = FD->getType();
@@ -422,7 +422,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
Arg = ParmVarDecl::Create(
Ctx, DebugFunctionDecl,
- CapVar ? CapVar->getLocStart() : FD->getLocStart(),
+ CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
/*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
} else {
@@ -459,7 +459,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
- FO.S->getLocStart(), CD->getBody()->getLocStart());
+ FO.S->getBeginLoc(), CD->getBody()->getBeginLoc());
unsigned Cnt = CD->getContextParamPosition();
I = FO.S->captures().begin();
for (const FieldDecl *FD : RD->fields()) {
@@ -602,7 +602,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
I->second.second,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
- CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getLocStart());
+ CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
if (EI != VLASizes.end()) {
@@ -611,12 +611,12 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
Arg->getType(),
AlignmentSource::Decl);
- CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getLocStart());
+ CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
}
}
CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
}
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, S.getBeginLoc(),
F, CallArgs);
WrapperCGF.FinishFunction();
return WrapperF;
@@ -763,7 +763,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
const auto *VDInit =
cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
bool IsRegistered;
- DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
LValue OriginalLVal = EmitLValue(&DRE);
@@ -878,8 +878,8 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
getContext().getTargetInfo().isTLSSupported()) {
assert(CapturedStmtInfo->lookup(VD) &&
"Copyin threadprivates should have been captured!");
- DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
- VK_LValue, (*IRef)->getExprLoc());
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
MasterAddr = EmitLValue(&DRE).getAddress();
LocalDeclMap.erase(VD);
} else {
@@ -953,11 +953,10 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
const auto *DestVD =
cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
- DeclRefExpr DRE(
- const_cast<VarDecl *>(OrigVD),
- /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
- OrigVD) != nullptr,
- (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
return EmitLValue(&DRE).getAddress();
});
// Check if the variable is also a firstprivate: in this case IInit is
@@ -1183,7 +1182,7 @@ void CodeGenFunction::EmitOMPReductionClauseFinal(
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
CGM.getOpenMPRuntime().emitReduction(
- *this, D.getLocEnd(), Privates, LHSExprs, RHSExprs, ReductionOps,
+ *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
{WithNowait, SimpleReduction, ReductionKind});
}
}
@@ -1237,12 +1236,12 @@ static void emitCommonOMPParallelDirective(
CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
/*IgnoreResultAssign=*/true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
- CGF, NumThreads, NumThreadsClause->getLocStart());
+ CGF, NumThreads, NumThreadsClause->getBeginLoc());
}
if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
CGF.CGM.getOpenMPRuntime().emitProcBindClause(
- CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
+ CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
}
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
@@ -1261,7 +1260,7 @@ static void emitCommonOMPParallelDirective(
// parameters when necessary
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
- CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
+ CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
CapturedVars, IfCond);
}
@@ -1281,7 +1280,7 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// propagation master's thread values of threadprivate variables to local
// instances of that variables of all other implicit threads.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(
- CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
}
CGF.EmitOMPPrivateClause(S, PrivateScope);
@@ -1384,7 +1383,7 @@ bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
- DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
VD->getInit()->getType(), VK_LValue,
VD->getInit()->getExprLoc());
@@ -1429,7 +1428,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
}
}
const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
- DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
Address OrigAddr = EmitLValue(&DRE).getAddress();
@@ -1473,7 +1472,8 @@ static void emitAlignedClause(CodeGenFunction &CGF,
"alignment is not power of 2");
if (Alignment != 0) {
llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
- CGF.EmitAlignmentAssumption(PtrValue, Alignment);
+ CGF.EmitAlignmentAssumption(
+ PtrValue, E, /*No second loc needed*/ SourceLocation(), Alignment);
}
}
}
@@ -1497,7 +1497,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
VD->hasGlobalStorage()) {
(void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
- DeclRefExpr DRE(const_cast<VarDecl *>(VD),
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
E->getType(), VK_LValue, E->getExprLoc());
return EmitLValue(&DRE).getAddress();
@@ -1509,6 +1509,23 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
}
++I;
}
+ // Privatize extra loop counters used in loops for ordered(n) clauses.
+ for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
+ if (!C->getNumForLoops())
+ continue;
+ for (unsigned I = S.getCollapsedNumber(),
+ E = C->getLoopNumIterations().size();
+ I < E; ++I) {
+ const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
+ const auto *VD = cast<VarDecl>(DRE->getDecl());
+ // Override only those variables that are really emitted already.
+ if (LocalDeclMap.count(VD)) {
+ (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
+ return CreateMemTemp(DRE->getType(), VD->getName());
+ });
+ }
+ }
+ }
}
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
@@ -1627,7 +1644,7 @@ void CodeGenFunction::EmitOMPSimdFinal(
if (CED) {
OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
} else {
- DeclRefExpr DRE(const_cast<VarDecl *>(PrivateVD),
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
/*RefersToEnclosingVariableOrCapture=*/false,
(*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
OrigAddr = EmitLValue(&DRE).getAddress();
@@ -1721,6 +1738,8 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPReductionClauseInit(S, LoopScope);
bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
S.getInc(),
[&S](CodeGenFunction &CGF) {
@@ -1785,7 +1804,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
} else {
BoolCondVal =
- RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, LoopArgs.IL,
+ RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
}
@@ -1819,7 +1838,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
else
EmitOMPSimdInit(S, IsMonotonic);
- SourceLocation Loc = S.getLocStart();
+ SourceLocation Loc = S.getBeginLoc();
// when 'distribute' is not combined with a 'for':
// while (idx <= UB) { BODY; ++idx; }
@@ -1851,7 +1870,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
// Tell the runtime we are done.
auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
if (!DynamicOrOrdered)
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(),
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
S.getDirectiveKind());
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
@@ -1934,13 +1953,13 @@ void CodeGenFunction::EmitOMPForOuterLoop(
llvm::Value *UBVal = DispatchBounds.second;
CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
LoopArgs.Chunk};
- RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
+ RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
IVSigned, Ordered, DipatchRTInputValues);
} else {
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
LoopArgs.ST, LoopArgs.Chunk);
- RT.emitForStaticInit(*this, S.getLocStart(), S.getDirectiveKind(),
+ RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
ScheduleKind, StaticInit);
}
@@ -1986,10 +2005,10 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
- RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, StaticInit);
+ RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
// for combined 'distribute' and 'for' the increment expression of distribute
- // is store in DistInc. For 'distribute' alone, it is in Inc.
+ // is stored in DistInc. For 'distribute' alone, it is in Inc.
Expr *IncExpr;
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
IncExpr = S.getDistInc();
@@ -2082,9 +2101,9 @@ emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
// distribute chunk
QualType IteratorTy = IVExpr->getType();
llvm::Value *LBVal =
- CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getLocStart());
+ CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
llvm::Value *UBVal =
- CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getLocStart());
+ CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
return {LBVal, UBVal};
}
@@ -2244,7 +2263,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
bool Ordered = false;
if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
if (OrderedClause->getNumForLoops())
- RT.emitDoacrossInit(*this, S);
+ RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
else
Ordered = true;
}
@@ -2270,7 +2289,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// initialization of firstprivate variables and post-update of
// lastprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(
- *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
}
EmitOMPPrivateClause(S, LoopScope);
@@ -2279,19 +2298,33 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
EmitOMPPrivateLoopCounters(S, LoopScope);
EmitOMPLinearClause(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
// Detect the loop schedule kind and chunk.
- llvm::Value *Chunk = nullptr;
+ const Expr *ChunkExpr = nullptr;
OpenMPScheduleTy ScheduleKind;
if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
ScheduleKind.Schedule = C->getScheduleKind();
ScheduleKind.M1 = C->getFirstScheduleModifier();
ScheduleKind.M2 = C->getSecondScheduleModifier();
- if (const Expr *Ch = C->getChunkSize()) {
- Chunk = EmitScalarExpr(Ch);
- Chunk = EmitScalarConversion(Chunk, Ch->getType(),
- S.getIterationVariable()->getType(),
- S.getLocStart());
+ ChunkExpr = C->getChunkSize();
+ } else {
+ // Default behaviour for schedule clause.
+ CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
+ *this, S, ScheduleKind.Schedule, ChunkExpr);
+ }
+ bool HasChunkSizeOne = false;
+ llvm::Value *Chunk = nullptr;
+ if (ChunkExpr) {
+ Chunk = EmitScalarExpr(ChunkExpr);
+ Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
+ S.getIterationVariable()->getType(),
+ S.getBeginLoc());
+ Expr::EvalResult Result;
+ if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
+ llvm::APSInt EvaluatedChunk = Result.Val.getInt();
+ HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
}
}
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
@@ -2300,8 +2333,12 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// If the static schedule kind is specified or if the ordered clause is
// specified, and if no monotonic modifier is specified, the effect will
// be as if the monotonic modifier was specified.
- if (RT.isStaticNonchunked(ScheduleKind.Schedule,
- /* Chunked */ Chunk != nullptr) &&
+ bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
+ /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
+ isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
+ if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
+ /* Chunked */ Chunk != nullptr) ||
+ StaticChunkedOne) &&
!Ordered) {
if (isOpenMPSimdDirective(S.getDirectiveKind()))
EmitOMPSimdInit(S, /*IsMonotonic=*/true);
@@ -2312,27 +2349,42 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// unspecified in this case.
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
- UB.getAddress(), ST.getAddress());
- RT.emitForStaticInit(*this, S.getLocStart(), S.getDirectiveKind(),
+ UB.getAddress(), ST.getAddress(),
+ StaticChunkedOne ? Chunk : nullptr);
+ RT.emitForStaticInit(*this, S.getBeginLoc(), S.getDirectiveKind(),
ScheduleKind, StaticInit);
JumpDest LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
- EmitIgnoredExpr(S.getEnsureUpperBound());
+ if (!StaticChunkedOne)
+ EmitIgnoredExpr(S.getEnsureUpperBound());
// IV = LB;
EmitIgnoredExpr(S.getInit());
- // while (idx <= UB) { BODY; ++idx; }
- EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
- S.getInc(),
- [&S, LoopExit](CodeGenFunction &CGF) {
- CGF.EmitOMPLoopBody(S, LoopExit);
- CGF.EmitStopPoint(&S);
- },
- [](CodeGenFunction &) {});
+ // For unchunked static schedule generate:
+ //
+ // while (idx <= UB) {
+ // BODY;
+ // ++idx;
+ // }
+ //
+ // For static schedule with chunk one:
+ //
+ // while (IV <= PrevUB) {
+ // BODY;
+ // IV += ST;
+ // }
+ EmitOMPInnerLoop(S, LoopScope.requiresCleanups(),
+ StaticChunkedOne ? S.getCombinedParForInDistCond() : S.getCond(),
+ StaticChunkedOne ? S.getDistInc() : S.getInc(),
+ [&S, LoopExit](CodeGenFunction &CGF) {
+ CGF.EmitOMPLoopBody(S, LoopExit);
+ CGF.EmitStopPoint(&S);
+ },
+ [](CodeGenFunction &) {});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(),
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
S.getDirectiveKind());
};
OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
@@ -2351,11 +2403,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
LoopArguments, CGDispatchBounds);
}
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- EmitOMPSimdFinal(S,
- [IL, &S](CodeGenFunction &CGF) {
- return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
- });
+ EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
+ return CGF.Builder.CreateIsNotNull(
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
+ });
}
EmitOMPReductionClauseFinal(
S, /*ReductionKind=*/isOpenMPSimdDirective(S.getDirectiveKind())
@@ -2365,17 +2416,17 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
emitPostUpdateForReductionClause(
*this, S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
});
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivateClause)
EmitOMPLastprivateClauseFinal(
S, isOpenMPSimdDirective(S.getDirectiveKind()),
- Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
+ Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
}
EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
});
DoacrossCleanupScope.ForceCleanup();
// We're now done with the loop, so jump to the continuation block.
@@ -2432,7 +2483,7 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
@@ -2450,7 +2501,7 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
}
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
@@ -2485,16 +2536,16 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.Builder.getInt32(0));
// Loop counter.
LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
- OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
+ OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
- OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
+ OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
- OK_Ordinary, S.getLocStart(), FPOptions());
+ OK_Ordinary, S.getBeginLoc(), FPOptions());
// Increment for loop counter.
UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
- S.getLocStart(), true);
+ S.getBeginLoc(), true);
auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
@@ -2509,7 +2560,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// .omp.sections.exit:
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
llvm::SwitchInst *SwitchStmt =
- CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getLocStart()),
+ CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
ExitBB, CS == nullptr ? 1 : CS->size());
if (CS) {
unsigned CaseNumber = 0;
@@ -2537,13 +2588,15 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// initialization of firstprivate variables and post-update of lastprivate
// variables.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(
- CGF, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
}
CGF.EmitOMPPrivateClause(S, LoopScope);
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
// Emit static non-chunked loop.
OpenMPScheduleTy ScheduleKind;
@@ -2552,20 +2605,20 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
/*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
LB.getAddress(), UB.getAddress(), ST.getAddress());
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
- CGF, S.getLocStart(), S.getDirectiveKind(), ScheduleKind, StaticInit);
+ CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
// UB = min(UB, GlobalUB);
- llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
+ llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
// IV = LB;
- CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
+ CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
// while (idx <= UB) { BODY; ++idx; }
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd(),
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
S.getDirectiveKind());
};
CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
@@ -2573,7 +2626,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
});
// Emit final copy of the lastprivate variables if IsLastIter != 0.
@@ -2581,7 +2634,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.EmitOMPLastprivateClauseFinal(
S, /*NoFinals=*/false,
CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart())));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
};
bool HasCancel = false;
@@ -2598,7 +2651,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
OMPD_unknown);
}
}
@@ -2610,7 +2663,7 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
}
// Emit an implicit barrier at the end.
if (!S.getSingleClause<OMPNowaitClause>()) {
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
OMPD_sections);
}
}
@@ -2652,7 +2705,7 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
};
{
OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
CopyprivateVars, DestExprs,
SrcExprs, AssignmentOps);
}
@@ -2660,7 +2713,7 @@ void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
// init or if no 'nowait' clause was specified and no 'copyprivate' clause).
if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
CGM.getOpenMPRuntime().emitBarrierCall(
- *this, S.getLocStart(),
+ *this, S.getBeginLoc(),
S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
}
}
@@ -2671,7 +2724,7 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
+ CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
@@ -2685,7 +2738,7 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitCriticalRegion(*this,
S.getDirectiveName().getAsString(),
- CodeGen, S.getLocStart(), Hint);
+ CodeGen, S.getBeginLoc(), Hint);
}
void CodeGenFunction::EmitOMPParallelForDirective(
@@ -2828,7 +2881,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
}
Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
- *this, S.getLocStart(), LHSs, RHSs, Data);
+ *this, S.getBeginLoc(), LHSs, RHSs, Data);
// Build list of dependences.
for (const auto *C : S.getClausesOfKind<OMPDependClause>())
for (const Expr *IRef : C->varlists())
@@ -2872,15 +2925,15 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
+ CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
CopyFn, CallArgs);
for (const auto &Pair : LastprivateDstsOrigs) {
const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
- DeclRefExpr DRE(
- const_cast<VarDecl *>(OrigVD),
- /*RefersToEnclosingVariableOrCapture=*/CGF.CapturedStmtInfo->lookup(
- OrigVD) != nullptr,
- Pair.second->getType(), VK_LValue, Pair.second->getExprLoc());
+ DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/
+ CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ Pair.second->getType(), VK_LValue,
+ Pair.second->getExprLoc());
Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
return CGF.EmitLValue(&DRE).getAddress();
});
@@ -2902,11 +2955,11 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG.emitAggregateType(CGF, Cnt);
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
- CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
- CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement =
Address(CGF.EmitScalarConversion(
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
@@ -2948,17 +3001,17 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
RedCG.emitSharedLValue(CGF, Cnt);
RedCG.emitAggregateType(CGF, Cnt);
// The taskgroup descriptor variable is always implicit firstprivate and
- // privatized already during procoessing of the firstprivates.
+ // privatized already during processing of the firstprivates.
// FIXME: This must removed once the runtime library is fixed.
// Emit required threadprivate variables for
- // initilizer/combiner/finalizer.
- CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getLocStart(),
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
RedCG, Cnt);
llvm::Value *ReductionsPtr =
CGF.EmitLoadOfScalar(CGF.EmitLValue(TaskgroupDescriptors[Cnt]),
TaskgroupDescriptors[Cnt]->getExprLoc());
Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
- CGF, S.getLocStart(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
Replacement = Address(
CGF.EmitScalarConversion(
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
@@ -3049,14 +3102,14 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
getContext().VoidPtrTy, ArrSize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
BPVD = createImplicitFirstprivateForType(
- getContext(), Data, BaseAndPointersType, CD, S.getLocStart());
+ getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
PVD = createImplicitFirstprivateForType(
- getContext(), Data, BaseAndPointersType, CD, S.getLocStart());
+ getContext(), Data, BaseAndPointersType, CD, S.getBeginLoc());
QualType SizesType = getContext().getConstantArrayType(
getContext().getSizeType(), ArrSize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
- S.getLocStart());
+ S.getBeginLoc());
TargetScope.addPrivate(
BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
TargetScope.addPrivate(PVD,
@@ -3091,7 +3144,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
}
- CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
+ CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
CopyFn, CallArgs);
for (const auto &Pair : PrivatePtrs) {
Address Replacement(CGF.Builder.CreateLoad(Pair.second),
@@ -3122,7 +3175,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation());
- CGM.getOpenMPRuntime().emitTaskCall(*this, S.getLocStart(), S, OutlinedFn,
+ CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
SharedsTy, CapturedStruct, &IfCond, Data);
}
@@ -3149,7 +3202,7 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
IfCond](CodeGenFunction &CGF, llvm::Value *OutlinedFn,
const OMPTaskDataTy &Data) {
- CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getLocStart(), S, OutlinedFn,
+ CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
SharedsTy, CapturedStruct, IfCond,
Data);
};
@@ -3158,15 +3211,15 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
void CodeGenFunction::EmitOMPTaskyieldDirective(
const OMPTaskyieldDirective &S) {
- CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
+ CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
}
void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
}
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
- CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
+ CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc());
}
void CodeGenFunction::EmitOMPTaskgroupDirective(
@@ -3195,7 +3248,7 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
}
}
llvm::Value *ReductionDesc =
- CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getLocStart(),
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
LHSs, RHSs, Data);
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
CGF.EmitVarDecl(*VD);
@@ -3205,7 +3258,7 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
+ CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
@@ -3217,7 +3270,7 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
FlushClause->varlist_end());
return llvm::None;
}(),
- S.getLocStart());
+ S.getBeginLoc());
}
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
@@ -3286,7 +3339,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// on initialization of firstprivate variables and post-update of
// lastprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(
- *this, S.getLocStart(), OMPD_unknown, /*EmitChecks=*/false,
+ *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
/*ForceSimpleCall=*/true);
}
EmitOMPPrivateClause(S, LoopScope);
@@ -3297,6 +3350,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
EmitOMPPrivateLoopCounters(S, LoopScope);
(void)LoopScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
// Detect the distribute schedule kind and chunk.
llvm::Value *Chunk = nullptr;
@@ -3307,8 +3362,12 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
Chunk = EmitScalarExpr(Ch);
Chunk = EmitScalarConversion(Chunk, Ch->getType(),
S.getIterationVariable()->getType(),
- S.getLocStart());
+ S.getBeginLoc());
}
+ } else {
+ // Default behaviour for dist_schedule clause.
+ CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
+ *this, S, ScheduleKind, Chunk);
}
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
@@ -3321,14 +3380,19 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// iteration space is divided into chunks that are approximately equal
// in size, and at most one chunk is distributed to each team of the
// league. The size of the chunks is unspecified in this case.
+ bool StaticChunked = RT.isStaticChunked(
+ ScheduleKind, /* Chunked */ Chunk != nullptr) &&
+ isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
if (RT.isStaticNonchunked(ScheduleKind,
- /* Chunked */ Chunk != nullptr)) {
+ /* Chunked */ Chunk != nullptr) ||
+ StaticChunked) {
if (isOpenMPSimdDirective(S.getDirectiveKind()))
EmitOMPSimdInit(S, /*IsMonotonic=*/true);
CGOpenMPRuntime::StaticRTInput StaticInit(
IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
- LB.getAddress(), UB.getAddress(), ST.getAddress());
- RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
+ StaticChunked ? Chunk : nullptr);
+ RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
StaticInit);
JumpDest LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
@@ -3346,18 +3410,48 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
? S.getCombinedCond()
: S.getCond();
- // for distribute alone, codegen
- // while (idx <= UB) { BODY; ++idx; }
- // when combined with 'for' (e.g. as in 'distribute parallel for')
- // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
+ if (StaticChunked)
+ Cond = S.getCombinedDistCond();
+
+ // For static unchunked schedules generate:
+ //
+ // 1. For distribute alone, codegen
+ // while (idx <= UB) {
+ // BODY;
+ // ++idx;
+ // }
+ //
+ // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
+ // while (idx <= UB) {
+ // <CodeGen rest of pragma>(LB, UB);
+ // idx += ST;
+ // }
+ //
+ // For static chunk one schedule generate:
+ //
+ // while (IV <= GlobalUB) {
+ // <CodeGen rest of pragma>(LB, UB);
+ // LB += ST;
+ // UB += ST;
+ // UB = min(UB, GlobalUB);
+ // IV = LB;
+ // }
+ //
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
[&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
CodeGenLoop(CGF, S, LoopExit);
},
- [](CodeGenFunction &) {});
+ [&S, StaticChunked](CodeGenFunction &CGF) {
+ if (StaticChunked) {
+ CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
+ CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
+ CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
+ CGF.EmitIgnoredExpr(S.getCombinedInit());
+ }
+ });
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- RT.emitForStaticFinish(*this, S.getLocStart(), S.getDirectiveKind());
+ RT.emitForStaticFinish(*this, S.getBeginLoc(), S.getDirectiveKind());
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
@@ -3370,38 +3464,25 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
});
}
if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
!isOpenMPParallelDirective(S.getDirectiveKind()) &&
!isOpenMPTeamsDirective(S.getDirectiveKind())) {
- OpenMPDirectiveKind ReductionKind = OMPD_unknown;
- if (isOpenMPParallelDirective(S.getDirectiveKind()) &&
- isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for_simd;
- } else if (isOpenMPParallelDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_parallel_for;
- } else if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- ReductionKind = OMPD_simd;
- } else if (!isOpenMPTeamsDirective(S.getDirectiveKind()) &&
- S.hasClausesOfKind<OMPReductionClause>()) {
- llvm_unreachable(
- "No reduction clauses is allowed in distribute directive.");
- }
- EmitOMPReductionClauseFinal(S, ReductionKind);
+ EmitOMPReductionClauseFinal(S, OMPD_simd);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
*this, S, [IL, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getLocStart()));
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
});
}
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivateClause) {
EmitOMPLastprivateClauseFinal(
S, /*NoFinals=*/false,
- Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
+ Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
}
}
@@ -3448,7 +3529,7 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
llvm::Function *OutlinedFn = emitOutlinedOrderedFunction(CGM, CS);
- CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
OutlinedFn, CapturedVars);
} else {
Action.Enter(CGF);
@@ -3456,7 +3537,7 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
}
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
- CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart(), !C);
+ CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
}
static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
@@ -3887,6 +3968,11 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
@@ -3903,13 +3989,13 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
const Stmt *CS = S.getInnermostCapturedStmt()->IgnoreContainers();
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS))
- enterFullExpression(EWC);
+ if (const auto *FE = dyn_cast<FullExpr>(CS))
+ enterFullExpression(FE);
// Processing for statements under 'atomic capture'.
if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
for (const Stmt *C : Compound->body()) {
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(C))
- enterFullExpression(EWC);
+ if (const auto *FE = dyn_cast<FullExpr>(C))
+ enterFullExpression(FE);
}
}
@@ -3918,7 +4004,7 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
CGF.EmitStopPoint(CS);
emitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
S.getV(), S.getExpr(), S.getUpdateExpr(),
- S.isXLHSInRHSPart(), S.getLocStart());
+ S.isXLHSInRHSPart(), S.getBeginLoc());
};
OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
@@ -3986,6 +4072,16 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
IsOffloadEntry, CodeGen);
OMPLexicalScope Scope(CGF, S, OMPD_task);
+ auto &&SizeEmitter = [](CodeGenFunction &CGF, const OMPLoopDirective &D) {
+ OMPLoopScope(CGF, D);
+ // Emit calculation of the iterations count.
+ llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
+ NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
+ /*IsSigned=*/false);
+ return NumIterations;
+ };
+ CGM.getOpenMPRuntime().emitTargetNumIterationsCall(CGF, S, Device,
+ SizeEmitter);
CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device);
}
@@ -3996,6 +4092,8 @@ static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
CGF.EmitOMPPrivateClause(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
}
@@ -4037,13 +4135,13 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
const Expr *ThreadLimit = TL ? TL->getThreadLimit() : nullptr;
CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
- S.getLocStart());
+ S.getBeginLoc());
}
OMPTeamsScope Scope(CGF, S);
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
- CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getLocStart(), OutlinedFn,
+ CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
CapturedVars);
}
@@ -4076,6 +4174,8 @@ static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
};
@@ -4394,7 +4494,7 @@ void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
void CodeGenFunction::EmitOMPCancellationPointDirective(
const OMPCancellationPointDirective &S) {
- CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
+ CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(),
S.getCancelRegion());
}
@@ -4407,7 +4507,7 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
break;
}
}
- CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(), IfCond,
+ CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond,
S.getCancelRegion());
}
@@ -4634,6 +4734,8 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF,
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
+ if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
+ CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
// TODO: Add support for clauses.
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
@@ -4864,7 +4966,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
S, isOpenMPSimdDirective(S.getDirectiveKind()),
CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
- (*LIP)->getType(), S.getLocStart())));
+ (*LIP)->getType(), S.getBeginLoc())));
}
};
auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
@@ -4873,7 +4975,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
&Data](CodeGenFunction &CGF, PrePostActionTy &) {
OMPLoopScope PreInitScope(CGF, S);
- CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getLocStart(), S,
+ CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
OutlinedFn, SharedsTy,
CapturedStruct, IfCond, Data);
};
@@ -4891,7 +4993,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
Data);
},
- S.getLocStart());
+ S.getBeginLoc());
}
}
@@ -4934,16 +5036,37 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
if (isOpenMPSimdDirective(D.getDirectiveKind())) {
emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
} else {
+ OMPPrivateScope LoopGlobals(CGF);
if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
for (const Expr *E : LD->counters()) {
- if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
- cast<DeclRefExpr>(E)->getDecl())) {
+ const auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
+ LValue GlobLVal = CGF.EmitLValue(E);
+ LoopGlobals.addPrivate(
+ VD, [&GlobLVal]() { return GlobLVal.getAddress(); });
+ }
+ if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
if (!CGF.LocalDeclMap.count(VD))
CGF.EmitVarDecl(*VD);
}
}
+ for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
+ if (!C->getNumForLoops())
+ continue;
+ for (unsigned I = LD->getCollapsedNumber(),
+ E = C->getLoopNumIterations().size();
+ I < E; ++I) {
+ if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
+ cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
+ // Emit only those that were not explicitly referenced in clauses.
+ if (!CGF.LocalDeclMap.count(VD))
+ CGF.EmitVarDecl(*VD);
+ }
+ }
+ }
}
+ LoopGlobals.Privatize();
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}
};
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index b0a3a0bffa2e..fbd8146702a9 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -119,10 +119,10 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+ unsigned Align = CGM.getDataLayout().getABITypeAlignment(CGM.Int8PtrTy);
- llvm::GlobalVariable *GV =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
- llvm::GlobalValue::ExternalLinkage);
+ llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
+ Name, ArrayType, llvm::GlobalValue::ExternalLinkage, Align);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
return GV;
}
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index cc334637a831..bfb089ff908e 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -16,9 +16,9 @@
#include "CodeGenModule.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Format.h"
#include "llvm/Transforms/Utils/Cloning.h"
@@ -128,7 +128,7 @@ static void resolveTopLevelMetadata(llvm::Function *Fn,
// they are referencing.
for (auto &BB : Fn->getBasicBlockList()) {
for (auto &I : BB) {
- if (auto *DII = dyn_cast<llvm::DbgInfoIntrinsic>(&I)) {
+ if (auto *DII = dyn_cast<llvm::DbgVariableIntrinsic>(&I)) {
auto *DILocal = DII->getVariable();
if (!DILocal->isResolved())
DILocal->resolve();
@@ -231,7 +231,7 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
// Build FunctionArgs.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- QualType ThisType = MD->getThisType(getContext());
+ QualType ThisType = MD->getThisType();
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
QualType ResultType;
if (IsUnprototyped)
@@ -304,13 +304,13 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
CGM.ErrorUnsupported(
MD, "non-trivial argument copy for return-adjusting thunk");
}
- EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr);
+ EmitMustTailThunk(CurGD, AdjustedThisPtr, CalleePtr);
return;
}
// Start building CallArgs.
CallArgList CallArgs;
- QualType ThisType = MD->getThisType(getContext());
+ QualType ThisType = MD->getThisType();
CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
if (isa<CXXDestructorDecl>(MD))
@@ -350,13 +350,12 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
: FPT->getReturnType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
- CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect)
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
// Now emit our call.
llvm::Instruction *CallOrInvoke;
- CGCallee Callee = CGCallee::forDirect(CalleePtr, MD);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CurGD);
RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke);
// Consider return adjustment if we have ThunkInfo.
@@ -375,7 +374,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
FinishThunk();
}
-void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
+void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
llvm::Value *AdjustedThisPtr,
llvm::Value *CalleePtr) {
// Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
@@ -412,7 +411,7 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
// Apply the standard set of call attributes.
unsigned CallingConv;
llvm::AttributeList Attrs;
- CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, MD, Attrs,
+ CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, GD, Attrs,
CallingConv, /*AttrOnCallSite=*/true);
Call->setAttributes(Attrs);
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
@@ -756,9 +755,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
Linkage = llvm::GlobalVariable::InternalLinkage;
+ unsigned Align = CGM.getDataLayout().getABITypeAlignment(VTType);
+
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage);
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage, Align);
CGM.setGVProperties(VTable, RD);
// V-tables are always unnamed_addr.
@@ -1020,8 +1021,8 @@ void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable,
AP.second.AddressPointIndex));
// Sort the address points for determinism.
- llvm::sort(AddressPoints.begin(), AddressPoints.end(),
- [this](const AddressPoint &AP1, const AddressPoint &AP2) {
+ llvm::sort(AddressPoints, [this](const AddressPoint &AP1,
+ const AddressPoint &AP2) {
if (&AP1 == &AP2)
return false;
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 0dcbea423ad7..da8a8efb840b 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -562,7 +562,10 @@ public:
}
void setVolatile(bool flag) {
- Quals.setVolatile(flag);
+ if (flag)
+ Quals.addVolatile();
+ else
+ Quals.removeVolatile();
}
Qualifiers::ObjCLifetime getObjCLifetime() const {
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 2a0f4f0e83ec..29c6793c601e 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -102,4 +102,5 @@ add_clang_library(clangCodeGen
clangBasic
clangFrontend
clangLex
+ clangSerialization
)
diff --git a/lib/CodeGen/CodeGenABITypes.cpp b/lib/CodeGen/CodeGenABITypes.cpp
index c152291b15b9..27f5d53ffe11 100644
--- a/lib/CodeGen/CodeGenABITypes.cpp
+++ b/lib/CodeGen/CodeGenABITypes.cpp
@@ -20,7 +20,6 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index d499364002f0..fd4506f2d197 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -127,6 +127,7 @@ namespace clang {
CodeGenOpts, C, CoverageInfo)),
LinkModules(std::move(LinkModules)) {
FrontendTimesIsEnabled = TimePasses;
+ llvm::TimePassesIsEnabled = TimePasses;
}
llvm::Module *getModule() const { return Gen->GetModule(); }
std::unique_ptr<llvm::Module> takeModule() {
@@ -548,12 +549,16 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
SourceLocation DILoc;
if (D.isLocationAvailable()) {
- D.getLocation(&Filename, &Line, &Column);
- const FileEntry *FE = FileMgr.getFile(Filename);
- if (FE && Line > 0) {
- // If -gcolumn-info was not used, Column will be 0. This upsets the
- // source manager, so pass 1 if Column is not set.
- DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1);
+ D.getLocation(Filename, Line, Column);
+ if (Line > 0) {
+ const FileEntry *FE = FileMgr.getFile(Filename);
+ if (!FE)
+ FE = FileMgr.getFile(D.getAbsolutePath());
+ if (FE) {
+ // If -gcolumn-info was not used, Column will be 0. This upsets the
+ // source manager, so pass 1 if Column is not set.
+ DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1);
+ }
}
BadDebugInfo = DILoc.isInvalid();
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 3c582688e91e..1713e40c312b 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -28,10 +28,10 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Intrinsics.h"
@@ -430,10 +430,25 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
NormalCleanupDest = Address::invalid();
}
- // Add the required-vector-width attribute.
- if (LargestVectorWidth != 0)
- CurFn->addFnAttr("min-legal-vector-width",
- llvm::utostr(LargestVectorWidth));
+ // Scan function arguments for vector width.
+ for (llvm::Argument &A : CurFn->args())
+ if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
+ // Update vector width based on return type.
+ if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ VT->getPrimitiveSizeInBits());
+
+ // Add the required-vector-width attribute. This contains the max width from:
+ // 1. min-vector-width attribute used in the source program.
+ // 2. Any builtins used that have a vector width specified.
+ // 3. Values passed in and out of inline assembly.
+ // 4. Width of vector arguments and return types for this function.
+ // 5. Width of vector aguments and return types for functions called by this
+ // function.
+ CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
}
/// ShouldInstrumentFunction - Return true if the current function should be
@@ -772,9 +787,11 @@ static bool endsWithReturn(const Decl* F) {
return false;
}
-static void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
- Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
- Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
+void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
+ if (SanOpts.has(SanitizerKind::Thread)) {
+ Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
+ Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
+ }
}
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
@@ -866,7 +883,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
if (SanOpts.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
- if (SanOpts.has(SanitizerKind::Memory))
+ if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
@@ -887,10 +904,6 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
(OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
markAsIgnoreThreadCheckingAtRuntime(Fn);
}
- } else if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- IdentifierInfo *II = FD->getIdentifier();
- if (II && II->isStr("__destroy_helper_block_"))
- markAsIgnoreThreadCheckingAtRuntime(Fn);
}
}
@@ -903,21 +916,21 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
}
// Apply xray attributes to the function (as a string, for now)
- bool InstrumentXray = ShouldXRayInstrumentFunction() &&
- CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
- XRayInstrKind::Function);
- if (D && InstrumentXray) {
+ if (D) {
if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
- if (XRayAttr->alwaysXRayInstrument())
- Fn->addFnAttr("function-instrument", "xray-always");
- if (XRayAttr->neverXRayInstrument())
- Fn->addFnAttr("function-instrument", "xray-never");
- if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) {
- Fn->addFnAttr("xray-log-args",
- llvm::utostr(LogArgs->getArgumentCount()));
+ if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
+ XRayInstrKind::Function)) {
+ if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("function-instrument", "xray-always");
+ if (XRayAttr->neverXRayInstrument())
+ Fn->addFnAttr("function-instrument", "xray-never");
+ if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
+ if (ShouldXRayInstrumentFunction())
+ Fn->addFnAttr("xray-log-args",
+ llvm::utostr(LogArgs->getArgumentCount()));
}
} else {
- if (!CGM.imbueXRayAttrs(Fn, Loc))
+ if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
Fn->addFnAttr(
"xray-instruction-threshold",
llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
@@ -981,6 +994,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (FD->isMain())
Fn->addFnAttr(llvm::Attribute::NoRecurse);
+ // If a custom alignment is used, force realigning to this alignment on
+ // any main function which certainly will need it.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
+ CGM.getCodeGenOpts().StackAlignment)
+ Fn->addFnAttr("stackrealign");
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -1053,9 +1073,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
// Count the implicit return.
if (!endsWithReturn(D))
++NumReturnExprs;
- } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
- // Indirect aggregate return; emit returned value directly into sret slot.
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
+ // Indirect return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
@@ -1137,7 +1156,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (CXXABIThisValue) {
SanitizerSet SkippedChecks;
SkippedChecks.set(SanitizerKind::ObjectSize, true);
- QualType ThisTy = MD->getThisType(getContext());
+ QualType ThisTy = MD->getThisType();
// If this is the call operator of a lambda with no capture-default, it
// may have a static invoker function, which may call this operator with
@@ -1183,8 +1202,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
LargestVectorWidth = VecWidth->getVectorWidth();
}
-void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
- const Stmt *Body) {
+void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
incrementProfileCounter(Body);
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
@@ -1238,7 +1256,7 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
if (CGM.getCXXABI().HasThisReturn(GD))
- ResTy = MD->getThisType(getContext());
+ ResTy = MD->getThisType();
else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
ResTy = CGM.getContext().VoidPtrTy;
CGM.getCXXABI().buildThisParam(*this, Args);
@@ -1352,7 +1370,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// copy-constructors.
emitImplicitAssignmentOperatorBody(Args);
} else if (Body) {
- EmitFunctionBody(Args, Body);
+ EmitFunctionBody(Body);
} else
llvm_unreachable("no definition for emitted function");
@@ -1493,10 +1511,11 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool AllowLabels) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
- llvm::APSInt Int;
- if (!Cond->EvaluateAsInt(Int, getContext()))
+ Expr::EvalResult Result;
+ if (!Cond->EvaluateAsInt(Result, getContext()))
return false; // Not foldable, not integer or not fully evaluatable.
+ llvm::APSInt Int = Result.Val.getInt();
if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
return false; // Contains a label.
@@ -1681,7 +1700,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
// create metadata that specifies that the branch is unpredictable.
// Don't bother if not optimizing because that metadata would not be used.
llvm::MDNode *Unpredictable = nullptr;
- auto *Call = dyn_cast<CallExpr>(Cond);
+ auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
@@ -2089,9 +2108,8 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
SanitizerScope SanScope(this);
llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
llvm::Constant *StaticArgs[] = {
- EmitCheckSourceLocation(size->getLocStart()),
- EmitCheckTypeDescriptor(size->getType())
- };
+ EmitCheckSourceLocation(size->getBeginLoc()),
+ EmitCheckTypeDescriptor(size->getType())};
EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
SanitizerKind::VLABound),
SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
@@ -2189,6 +2207,49 @@ void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
protection.Inst->eraseFromParent();
}
+void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+ QualType Ty, SourceLocation Loc,
+ SourceLocation AssumptionLoc,
+ llvm::Value *Alignment,
+ llvm::Value *OffsetValue) {
+ llvm::Value *TheCheck;
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
+ if (SanOpts.has(SanitizerKind::Alignment)) {
+ EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ OffsetValue, TheCheck, Assumption);
+ }
+}
+
+void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+ QualType Ty, SourceLocation Loc,
+ SourceLocation AssumptionLoc,
+ unsigned Alignment,
+ llvm::Value *OffsetValue) {
+ llvm::Value *TheCheck;
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
+ if (SanOpts.has(SanitizerKind::Alignment)) {
+ llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
+ EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
+ OffsetValue, TheCheck, Assumption);
+ }
+}
+
+void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
+ const Expr *E,
+ SourceLocation AssumptionLoc,
+ unsigned Alignment,
+ llvm::Value *OffsetValue) {
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ E = CE->getSubExprAsWritten();
+ QualType Ty = E->getType();
+ SourceLocation Loc = E->getExprLoc();
+
+ EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
+ OffsetValue);
+}
+
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
@@ -2225,7 +2286,7 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
// annotation on the first field of a struct and annotation on the struct
// itself.
if (VTy != CGM.Int8PtrTy)
- V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
+ V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
V = Builder.CreateBitCast(V, VTy);
}
@@ -2272,7 +2333,7 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
// Now build up the set of caller features and verify that all the required
// features are there.
llvm::StringMap<bool> CallerFeatureMap;
- CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
+ CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
// If we have at least one of the features in the feature list return
// true, otherwise return false.
@@ -2280,14 +2341,13 @@ static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
SmallVector<StringRef, 1> OrFeatures;
Feature.split(OrFeatures, '|');
- return std::any_of(OrFeatures.begin(), OrFeatures.end(),
- [&](StringRef Feature) {
- if (!CallerFeatureMap.lookup(Feature)) {
- FirstMissing = Feature.str();
- return false;
- }
- return true;
- });
+ return llvm::any_of(OrFeatures, [&](StringRef Feature) {
+ if (!CallerFeatureMap.lookup(Feature)) {
+ FirstMissing = Feature.str();
+ return false;
+ }
+ return true;
+ });
});
}
@@ -2319,7 +2379,7 @@ void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
return;
StringRef(FeatureList).split(ReqFeatures, ',');
if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
- CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
+ CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
<< TargetDecl->getDeclName()
<< CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
@@ -2345,7 +2405,7 @@ void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
ReqFeatures.push_back(F.getKey());
}
if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
- CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature)
+ CGM.getDiags().Report(E->getBeginLoc(), diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
}
}
@@ -2359,91 +2419,81 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
CGM.getSanStats().create(IRB, SSK);
}
-llvm::Value *CodeGenFunction::FormResolverCondition(
- const TargetMultiVersionResolverOption &RO) {
- llvm::Value *TrueCondition = nullptr;
- if (!RO.ParsedAttribute.Architecture.empty())
- TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture);
+llvm::Value *
+CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
+ llvm::Value *Condition = nullptr;
+
+ if (!RO.Conditions.Architecture.empty())
+ Condition = EmitX86CpuIs(RO.Conditions.Architecture);
- if (!RO.ParsedAttribute.Features.empty()) {
- SmallVector<StringRef, 8> FeatureList;
- llvm::for_each(RO.ParsedAttribute.Features,
- [&FeatureList](const std::string &Feature) {
- FeatureList.push_back(StringRef{Feature}.substr(1));
- });
- llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList);
- TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp)
- : FeatureCmp;
+ if (!RO.Conditions.Features.empty()) {
+ llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
+ Condition =
+ Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
}
- return TrueCondition;
+ return Condition;
}
-void CodeGenFunction::EmitTargetMultiVersionResolver(
- llvm::Function *Resolver,
- ArrayRef<TargetMultiVersionResolverOption> Options) {
- assert((getContext().getTargetInfo().getTriple().getArch() ==
- llvm::Triple::x86 ||
- getContext().getTargetInfo().getTriple().getArch() ==
- llvm::Triple::x86_64) &&
- "Only implemented for x86 targets");
-
- // Main function's basic block.
- llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
- Builder.SetInsertPoint(CurBlock);
- EmitX86CpuInit();
+static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
+ llvm::Function *Resolver,
+ CGBuilderTy &Builder,
+ llvm::Function *FuncToReturn,
+ bool SupportsIFunc) {
+ if (SupportsIFunc) {
+ Builder.CreateRet(FuncToReturn);
+ return;
+ }
- llvm::Function *DefaultFunc = nullptr;
- for (const TargetMultiVersionResolverOption &RO : Options) {
- Builder.SetInsertPoint(CurBlock);
- llvm::Value *TrueCondition = FormResolverCondition(RO);
+ llvm::SmallVector<llvm::Value *, 10> Args;
+ llvm::for_each(Resolver->args(),
+ [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
- if (!TrueCondition) {
- DefaultFunc = RO.Function;
- } else {
- llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver);
- llvm::IRBuilder<> RetBuilder(RetBlock);
- RetBuilder.CreateRet(RO.Function);
- CurBlock = createBasicBlock("ro_else", Resolver);
- Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
- }
- }
+ llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
+ Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
- assert(DefaultFunc && "No default version?");
- // Emit return from the 'else-ist' block.
- Builder.SetInsertPoint(CurBlock);
- Builder.CreateRet(DefaultFunc);
+ if (Resolver->getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Result);
}
-void CodeGenFunction::EmitCPUDispatchMultiVersionResolver(
- llvm::Function *Resolver,
- ArrayRef<CPUDispatchMultiVersionResolverOption> Options) {
+void CodeGenFunction::EmitMultiVersionResolver(
+ llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
assert((getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86 ||
getContext().getTargetInfo().getTriple().getArch() ==
llvm::Triple::x86_64) &&
"Only implemented for x86 targets");
+ bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
+
// Main function's basic block.
llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
Builder.SetInsertPoint(CurBlock);
EmitX86CpuInit();
- for (const CPUDispatchMultiVersionResolverOption &RO : Options) {
+ for (const MultiVersionResolverOption &RO : Options) {
Builder.SetInsertPoint(CurBlock);
-
- // "generic" case should catch-all.
- if (RO.FeatureMask == 0) {
- Builder.CreateRet(RO.Function);
+ llvm::Value *Condition = FormResolverCondition(RO);
+
+ // The 'default' or 'generic' case.
+ if (!Condition) {
+ assert(&RO == Options.end() - 1 &&
+ "Default or Generic case must be last");
+ CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
+ SupportsIFunc);
return;
}
+
llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
- llvm::IRBuilder<> RetBuilder(RetBlock);
- RetBuilder.CreateRet(RO.Function);
+ CGBuilderTy RetBuilder(*this, RetBlock);
+ CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
+ SupportsIFunc);
CurBlock = createBasicBlock("resolver_else", Resolver);
- llvm::Value *TrueCondition = EmitX86CpuSupports(RO.FeatureMask);
- Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
+ Builder.CreateCondBr(Condition, RetBlock, CurBlock);
}
+ // If no generic/default, emit an unreachable.
Builder.SetInsertPoint(CurBlock);
llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
TrapCall->setDoesNotReturn();
@@ -2452,6 +2502,61 @@ void CodeGenFunction::EmitCPUDispatchMultiVersionResolver(
Builder.ClearInsertionPoint();
}
+// Loc - where the diagnostic will point, where in the source code this
+// alignment has failed.
+// SecondaryLoc - if present (will be present if sufficiently different from
+// Loc), the diagnostic will additionally point a "Note:" to this location.
+// It should be the location where the __attribute__((assume_aligned))
+// was written e.g.
+void CodeGenFunction::EmitAlignmentAssumptionCheck(
+ llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
+ SourceLocation SecondaryLoc, llvm::Value *Alignment,
+ llvm::Value *OffsetValue, llvm::Value *TheCheck,
+ llvm::Instruction *Assumption) {
+ assert(Assumption && isa<llvm::CallInst>(Assumption) &&
+ cast<llvm::CallInst>(Assumption)->getCalledValue() ==
+ llvm::Intrinsic::getDeclaration(
+ Builder.GetInsertBlock()->getParent()->getParent(),
+ llvm::Intrinsic::assume) &&
+ "Assumption should be a call to llvm.assume().");
+ assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
+ "Assumption should be the last instruction of the basic block, "
+ "since the basic block is still being generated.");
+
+ if (!SanOpts.has(SanitizerKind::Alignment))
+ return;
+
+ // Don't check pointers to volatile data. The behavior here is implementation-
+ // defined.
+ if (Ty->getPointeeType().isVolatileQualified())
+ return;
+
+ // We need to temorairly remove the assumption so we can insert the
+ // sanitizer check before it, else the check will be dropped by optimizations.
+ Assumption->removeFromParent();
+
+ {
+ SanitizerScope SanScope(this);
+
+ if (!OffsetValue)
+ OffsetValue = Builder.getInt1(0); // no offset.
+
+ llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
+ EmitCheckSourceLocation(SecondaryLoc),
+ EmitCheckTypeDescriptor(Ty)};
+ llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
+ EmitCheckValue(Alignment),
+ EmitCheckValue(OffsetValue)};
+ EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
+ SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
+ }
+
+ // We are now in the (new, empty) "cont" basic block.
+ // Reintroduce the assumption.
+ Builder.Insert(Assumption);
+ // FIXME: Assumption still has it's original basic block as it's Parent.
+}
+
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
if (CGDebugInfo *DI = getDebugInfo())
return DI->SourceLocToDebugLoc(Location);
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f9e284232972..89cb850ab1b1 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -29,9 +29,9 @@
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
@@ -131,6 +131,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
+ SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
enum SanitizerHandler {
@@ -470,7 +471,7 @@ public:
/// potentially set the return value.
bool SawAsmBlock = false;
- const FunctionDecl *CurSEHParent = nullptr;
+ const NamedDecl *CurSEHParent = nullptr;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
@@ -1197,6 +1198,8 @@ public:
private:
CGDebugInfo *DebugInfo;
+ /// Used to create unique names for artificial VLA size debug info variables.
+ unsigned VLAExprCounter = 0;
bool DisableDebugInfo = false;
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
@@ -1746,6 +1749,9 @@ public:
bool IsLambdaConversionToBlock,
bool BuildGlobalBlock);
+ /// Check if \p T is a C++ class that has a destructor that can throw.
+ static bool cxxDestructorCanThrow(QualType T);
+
llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
@@ -1754,7 +1760,8 @@ public:
const ObjCPropertyImplDecl *PID);
llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
- void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags,
+ bool CanThrow);
class AutoVarEmission;
@@ -1777,13 +1784,13 @@ public:
/// \param LoadBlockVarAddr Indicates whether we need to emit a load from
/// \p Addr to get the address of the __block structure.
void enterByrefCleanup(CleanupKind Kind, Address Addr, BlockFieldFlags Flags,
- bool LoadBlockVarAddr);
+ bool LoadBlockVarAddr, bool CanThrow);
void setBlockContextParameter(const ImplicitParamDecl *D, unsigned argNum,
llvm::Value *ptr);
Address LoadBlockStruct();
- Address GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ Address GetAddrOfBlockDecl(const VarDecl *var);
/// BuildBlockByrefAddress - Computes the location of the
/// data in a variable which is declared as __block.
@@ -1800,6 +1807,11 @@ public:
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
+
+ /// Annotate the function with an attribute that disables TSan checking at
+ /// runtime.
+ void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn);
+
/// Emit code for the start of a function.
/// \param Loc The location to be associated with the function.
/// \param StartLoc The location of the function body.
@@ -1816,7 +1828,7 @@ public:
void EmitConstructorBody(FunctionArgList &Args);
void EmitDestructorBody(FunctionArgList &Args);
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
- void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
+ void EmitFunctionBody(const Stmt *Body);
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
@@ -1845,7 +1857,7 @@ public:
void FinishThunk();
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
- void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
+ void EmitMustTailThunk(GlobalDecl GD, llvm::Value *AdjustedThisPtr,
llvm::Value *Callee);
/// Generate a thunk for the given method.
@@ -2622,12 +2634,6 @@ public:
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
- llvm::Value *OffsetValue = nullptr) {
- Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
- OffsetValue);
- }
-
/// Converts Location to a DebugLoc, if debug information is enabled.
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
@@ -2674,8 +2680,9 @@ public:
llvm::Value *NRVOFlag;
- /// True if the variable is a __block variable.
- bool IsByRef;
+ /// True if the variable is a __block variable that is captured by an
+ /// escaping block.
+ bool IsEscapingByRef;
/// True if the variable is of aggregate type and has a constant
/// initializer.
@@ -2695,7 +2702,7 @@ public:
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
- IsByRef(false), IsConstantAggregate(false),
+ IsEscapingByRef(false), IsConstantAggregate(false),
SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
@@ -2725,7 +2732,7 @@ public:
/// Note that this does not chase the forwarding pointer for
/// __block decls.
Address getObjectAddress(CodeGenFunction &CGF) const {
- if (!IsByRef) return Addr;
+ if (!IsEscapingByRef) return Addr;
return CGF.emitBlockByrefAddress(Addr, Variable, /*forward*/ false);
}
@@ -2790,11 +2797,27 @@ public:
PeepholeProtection protectFromPeepholes(RValue rvalue);
void unprotectFromPeepholes(PeepholeProtection protection);
- void EmitAlignmentAssumption(llvm::Value *PtrValue, llvm::Value *Alignment,
- llvm::Value *OffsetValue = nullptr) {
- Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
- OffsetValue);
- }
+ void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,
+ SourceLocation Loc,
+ SourceLocation AssumptionLoc,
+ llvm::Value *Alignment,
+ llvm::Value *OffsetValue,
+ llvm::Value *TheCheck,
+ llvm::Instruction *Assumption);
+
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
+ SourceLocation Loc, SourceLocation AssumptionLoc,
+ llvm::Value *Alignment,
+ llvm::Value *OffsetValue = nullptr);
+
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,
+ SourceLocation Loc, SourceLocation AssumptionLoc,
+ unsigned Alignment,
+ llvm::Value *OffsetValue = nullptr);
+
+ void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E,
+ SourceLocation AssumptionLoc, unsigned Alignment,
+ llvm::Value *OffsetValue = nullptr);
//===--------------------------------------------------------------------===//
// Statement Emission
@@ -2878,6 +2901,8 @@ public:
void EnterSEHTryStmt(const SEHTryStmt &S);
void ExitSEHTryStmt(const SEHTryStmt &S);
+ void pushSEHCleanup(CleanupKind kind,
+ llvm::Function *FinallyFunc);
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
const Stmt *OutlinedStmt);
@@ -3512,6 +3537,7 @@ public:
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
ConstantEmission tryEmitAsConstant(const MemberExpr *ME);
+ llvm::Value *emitScalarConstant(const ConstantEmission &Constant, Expr *E);
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
AggValueSlot slot = AggValueSlot::ignored());
@@ -3603,6 +3629,19 @@ public:
CXXDtorType Type,
const CXXRecordDecl *RD);
+ // Return the copy constructor name with the prefix "__copy_constructor_"
+ // removed.
+ static std::string getNonTrivialCopyConstructorStr(QualType QT,
+ CharUnits Alignment,
+ bool IsVolatile,
+ ASTContext &Ctx);
+
+ // Return the destructor name with the prefix "__destructor_" removed.
+ static std::string getNonTrivialDestructorStr(QualType QT,
+ CharUnits Alignment,
+ bool IsVolatile,
+ ASTContext &Ctx);
+
// These functions emit calls to the special functions of non-trivial C
// structs.
void defaultInitNonTrivialCStructVar(LValue Dst);
@@ -3653,9 +3692,10 @@ public:
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue);
- RValue EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E,
- ReturnValueSlot ReturnValue);
+ RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
+ const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ RValue emitRotate(const CallExpr *E, bool IsRotateRight);
/// Emit IR for __builtin_os_log_format.
RValue emitBuiltinOSLogFormat(const CallExpr &E);
@@ -3769,6 +3809,11 @@ public:
llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
llvm::Value *EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value);
+ llvm::Value *EmitObjCAutorelease(llvm::Value *value, llvm::Type *returnType);
+ llvm::Value *EmitObjCRetainNonBlock(llvm::Value *value,
+ llvm::Type *returnType);
+ void EmitObjCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
+
std::pair<LValue,llvm::Value*>
EmitARCStoreAutoreleasing(const BinaryOperator *e);
std::pair<LValue,llvm::Value*>
@@ -3776,6 +3821,10 @@ public:
std::pair<LValue,llvm::Value*>
EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored);
+ llvm::Value *EmitObjCAlloc(llvm::Value *value,
+ llvm::Type *returnType);
+ llvm::Value *EmitObjCAllocWithZone(llvm::Value *value,
+ llvm::Type *returnType);
llvm::Value *EmitObjCThrowOperand(const Expr *expr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
@@ -3865,6 +3914,8 @@ public:
AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV);
+ // Emit an @llvm.invariant.start call for the given memory region.
+ void EmitInvariantStart(llvm::Constant *Addr, CharUnits Size);
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
@@ -3900,9 +3951,10 @@ public:
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
- void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- ArrayRef<llvm::Function *> CXXThreadLocals,
- Address Guard = Address::invalid());
+ void
+ GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ ArrayRef<llvm::Function *> CXXThreadLocals,
+ ConstantAddress Guard = ConstantAddress::invalid());
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
@@ -3920,11 +3972,13 @@ public:
void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp);
- void enterFullExpression(const ExprWithCleanups *E) {
- if (E->getNumObjects() == 0) return;
+ void enterFullExpression(const FullExpr *E) {
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(E))
+ if (EWC->getNumObjects() == 0)
+ return;
enterNonTrivialFullExpression(E);
}
- void enterNonTrivialFullExpression(const ExprWithCleanups *E);
+ void enterNonTrivialFullExpression(const FullExpr *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
@@ -4245,47 +4299,29 @@ public:
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK);
- struct TargetMultiVersionResolverOption {
+ struct MultiVersionResolverOption {
llvm::Function *Function;
- TargetAttr::ParsedTargetAttr ParsedAttribute;
- unsigned Priority;
- TargetMultiVersionResolverOption(
- const TargetInfo &TargInfo, llvm::Function *F,
- const clang::TargetAttr::ParsedTargetAttr &PT)
- : Function(F), ParsedAttribute(PT), Priority(0u) {
- for (StringRef Feat : PT.Features)
- Priority = std::max(Priority,
- TargInfo.multiVersionSortPriority(Feat.substr(1)));
-
- if (!PT.Architecture.empty())
- Priority = std::max(Priority,
- TargInfo.multiVersionSortPriority(PT.Architecture));
- }
-
- bool operator>(const TargetMultiVersionResolverOption &Other) const {
- return Priority > Other.Priority;
- }
+ FunctionDecl *FD;
+ struct Conds {
+ StringRef Architecture;
+ llvm::SmallVector<StringRef, 8> Features;
+
+ Conds(StringRef Arch, ArrayRef<StringRef> Feats)
+ : Architecture(Arch), Features(Feats.begin(), Feats.end()) {}
+ } Conditions;
+
+ MultiVersionResolverOption(llvm::Function *F, StringRef Arch,
+ ArrayRef<StringRef> Feats)
+ : Function(F), Conditions(Arch, Feats) {}
};
- void EmitTargetMultiVersionResolver(
- llvm::Function *Resolver,
- ArrayRef<TargetMultiVersionResolverOption> Options);
- struct CPUDispatchMultiVersionResolverOption {
- llvm::Function *Function;
- // Note: EmitX86CPUSupports only has 32 bits available, so we store the mask
- // as 32 bits here. When 64-bit support is added to __builtin_cpu_supports,
- // this can be extended to 64 bits.
- uint32_t FeatureMask;
- CPUDispatchMultiVersionResolverOption(llvm::Function *F, uint64_t Mask)
- : Function(F), FeatureMask(static_cast<uint32_t>(Mask)) {}
- bool operator>(const CPUDispatchMultiVersionResolverOption &Other) const {
- return FeatureMask > Other.FeatureMask;
- }
- };
- void EmitCPUDispatchMultiVersionResolver(
- llvm::Function *Resolver,
- ArrayRef<CPUDispatchMultiVersionResolverOption> Options);
- static uint32_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+ // Emits the body of a multiversion function's resolver. Assumes that the
+ // options are already sorted in the proper order, with the 'default' option
+ // last (if it exists).
+ void EmitMultiVersionResolver(llvm::Function *Resolver,
+ ArrayRef<MultiVersionResolverOption> Options);
+
+ static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
private:
QualType getVarArgType(const Expr *Arg);
@@ -4302,10 +4338,9 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
- llvm::Value *EmitX86CpuSupports(uint32_t Mask);
+ llvm::Value *EmitX86CpuSupports(uint64_t Mask);
llvm::Value *EmitX86CpuInit();
- llvm::Value *
- FormResolverCondition(const TargetMultiVersionResolverOption &RO);
+ llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
};
inline DominatingLLVMValue::saved_type
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 8c5e0df0969b..244738042cef 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -36,14 +36,15 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
-#include "clang/Frontend/CodeGenOptions.h"
-#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/CallSite.h"
@@ -53,6 +54,7 @@
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
@@ -124,7 +126,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
createObjCRuntime();
if (LangOpts.OpenCL)
createOpenCLRuntime();
@@ -147,12 +149,12 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Block.GlobalUniqueCount = 0;
- if (C.getLangOpts().ObjC1)
+ if (C.getLangOpts().ObjC)
ObjCData.reset(new ObjCEntrypoints());
if (CodeGenOpts.hasProfileClangUse()) {
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
- CodeGenOpts.ProfileInstrumentUsePath);
+ CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
if (auto E = ReaderOrErr.takeError()) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Could not read profile %0: %1");
@@ -320,8 +322,6 @@ void CodeGenModule::checkAliases() {
assert(FTy);
if (!FTy->getReturnType()->isPointerTy())
Diags.Report(Location, diag::err_ifunc_resolver_return);
- if (FTy->getNumParams())
- Diags.Report(Location, diag::err_ifunc_resolver_params);
}
llvm::Constant *Aliasee = Alias->getIndirectSymbol();
@@ -458,9 +458,12 @@ void CodeGenModule::Release() {
// Indicate that we want CodeView in the metadata.
getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
}
+ if (CodeGenOpts.CodeViewGHash) {
+ getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
+ }
if (CodeGenOpts.ControlFlowGuard) {
// We want function ID tables for Control Flow Guard.
- getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
+ getModule().addModuleFlag(llvm::Module::Warning, "cfguardtable", 1);
}
if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
// We don't support LTO with 2 with different StrictVTablePointers
@@ -556,6 +559,20 @@ void CodeGenModule::Release() {
getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
}
+ if (getCodeGenOpts().CodeModel.size() > 0) {
+ unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
+ .Case("tiny", llvm::CodeModel::Tiny)
+ .Case("small", llvm::CodeModel::Small)
+ .Case("kernel", llvm::CodeModel::Kernel)
+ .Case("medium", llvm::CodeModel::Medium)
+ .Case("large", llvm::CodeModel::Large)
+ .Default(~0u);
+ if (CM != ~0u) {
+ llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
+ getModule().setCodeModel(codeModel);
+ }
+ }
+
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
@@ -573,6 +590,9 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitVersionIdentMetadata)
EmitVersionIdentMetadata();
+ if (!getCodeGenOpts().RecordCommandLine.empty())
+ EmitCommandLineMetadata();
+
EmitTargetMetadata();
}
@@ -683,8 +703,8 @@ void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
- getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
- << Msg << S->getSourceRange();
+ getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
+ << Msg << S->getSourceRange();
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
@@ -730,6 +750,14 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
return false;
const llvm::Triple &TT = CGM.getTriple();
+ if (TT.isWindowsGNUEnvironment()) {
+ // In MinGW, variables without DLLImport can still be automatically
+ // imported from a DLL by the linker; don't mark variables that
+ // potentially could come from another DLL as DSO local.
+ if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
+ !GV->isThreadLocal())
+ return false;
+ }
// Every other GV is local on COFF.
// Make an exception for windows OS in the triple: Some firmware builds use
// *-win32-macho triples. This (accidentally?) produced windows relocations
@@ -869,11 +897,13 @@ static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
const CPUSpecificAttr *Attr,
+ unsigned CPUIndex,
raw_ostream &Out) {
- // cpu_specific gets the current name, dispatch gets the resolver.
+ // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
+ // supported.
if (Attr)
- Out << getCPUSpecificMangling(CGM, Attr->getCurCPUName()->getName());
- else
+ Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
+ else if (CGM.getTarget().supportsIFunc())
Out << ".resolver";
}
@@ -939,11 +969,19 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
- if (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())
- AppendCPUSpecificCPUDispatchMangling(
- CGM, FD->getAttr<CPUSpecificAttr>(), Out);
- else
+ switch (FD->getMultiVersionKind()) {
+ case MultiVersionKind::CPUDispatch:
+ case MultiVersionKind::CPUSpecific:
+ AppendCPUSpecificCPUDispatchMangling(CGM,
+ FD->getAttr<CPUSpecificAttr>(),
+ GD.getMultiVersionIndex(), Out);
+ break;
+ case MultiVersionKind::Target:
AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
+ break;
+ case MultiVersionKind::None:
+ llvm_unreachable("None multiversion type isn't valid here");
+ }
}
return Out.str();
@@ -968,8 +1006,10 @@ void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
"Other GD should now be a multiversioned function");
// OtherFD is the version of this function that was mangled BEFORE
// becoming a MultiVersion function. It potentially needs to be updated.
- const FunctionDecl *OtherFD =
- OtherGD.getCanonicalDecl().getDecl()->getAsFunction();
+ const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
+ .getDecl()
+ ->getAsFunction()
+ ->getMostRecentDecl();
std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
// This is so that if the initial version was already the 'default'
// version, we don't try to update it.
@@ -1001,26 +1041,6 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
}
}
- const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl());
- // Since CPUSpecific can require multiple emits per decl, store the manglings
- // separately.
- if (FD &&
- (FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion())) {
- const auto *SD = FD->getAttr<CPUSpecificAttr>();
-
- std::pair<GlobalDecl, unsigned> SpecCanonicalGD{
- CanonicalGD,
- SD ? SD->ActiveArgIndex : std::numeric_limits<unsigned>::max()};
-
- auto FoundName = CPUSpecificMangledDeclNames.find(SpecCanonicalGD);
- if (FoundName != CPUSpecificMangledDeclNames.end())
- return FoundName->second;
-
- auto Result = CPUSpecificManglings.insert(
- std::make_pair(getMangledNameImpl(*this, GD, FD), SpecCanonicalGD));
- return CPUSpecificMangledDeclNames[SpecCanonicalGD] = Result.first->first();
- }
-
auto FoundName = MangledDeclNames.find(CanonicalGD);
if (FoundName != MangledDeclNames.end())
return FoundName->second;
@@ -1082,11 +1102,12 @@ void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
- llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+ llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
+ TheModule.getDataLayout().getProgramAddressSpace());
// Get the type of a ctor entry, { i32, void ()*, i8* }.
llvm::StructType *CtorStructTy = llvm::StructType::get(
- Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy);
+ Int32Ty, CtorPFTy, VoidPtrTy);
// Construct the constructor and destructor arrays.
ConstantInitBuilder builder(*this);
@@ -1142,12 +1163,12 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
-void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
const CGFunctionInfo &Info,
llvm::Function *F) {
unsigned CallingConv;
llvm::AttributeList PAL;
- ConstructAttributeList(F->getName(), Info, D, PAL, CallingConv, false);
+ ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -1277,9 +1298,19 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// Otherwise, propagate the inline hint attribute and potentially use its
// absence to mark things as noinline.
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (any_of(FD->redecls(), [&](const FunctionDecl *Redecl) {
- return Redecl->isInlineSpecified();
- })) {
+ // Search function and template pattern redeclarations for inline.
+ auto CheckForInline = [](const FunctionDecl *FD) {
+ auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
+ return Redecl->isInlineSpecified();
+ };
+ if (any_of(FD->redecls(), CheckRedeclForInline))
+ return true;
+ const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
+ if (!Pattern)
+ return false;
+ return any_of(Pattern->redecls(), CheckRedeclForInline);
+ };
+ if (CheckForInline(FD)) {
B.addAttribute(llvm::Attribute::InlineHint);
} else if (CodeGenOpts.getInlining() ==
CodeGenOptions::OnlyHintInlining &&
@@ -1350,23 +1381,30 @@ void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
if (D && D->hasAttr<UsedAttr>())
addUsedGlobal(GV);
+
+ if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
+ const auto *VD = cast<VarDecl>(D);
+ if (VD->getType().isConstQualified() &&
+ VD->getStorageDuration() == SD_Static)
+ addUsedGlobal(GV);
+ }
}
-bool CodeGenModule::GetCPUAndFeaturesAttributes(const Decl *D,
+bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &Attrs) {
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
std::vector<std::string> Features;
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
FD = FD ? FD->getMostRecentDecl() : FD;
const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
bool AddedAttr = false;
if (TD || SD) {
llvm::StringMap<bool> FeatureMap;
- getFunctionFeatureMap(FeatureMap, FD);
+ getFunctionFeatureMap(FeatureMap, GD);
// Produce the canonical string for this set of features.
for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
@@ -1393,7 +1431,7 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(const Decl *D,
AddedAttr = true;
}
if (!Features.empty()) {
- llvm::sort(Features.begin(), Features.end());
+ llvm::sort(Features);
Attrs.addAttribute("target-features", llvm::join(Features, ","));
AddedAttr = true;
}
@@ -1422,7 +1460,7 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
F->addFnAttr("implicit-section-name", SA->getName());
llvm::AttrBuilder Attrs;
- if (GetCPUAndFeaturesAttributes(D, Attrs)) {
+ if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
@@ -1445,7 +1483,7 @@ void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
const CGFunctionInfo &FI) {
const Decl *D = GD.getDecl();
- SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributes(GD, FI, F);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
@@ -1507,7 +1545,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction) {
- SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
+ SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
// Setup target-specific attributes.
if (F->isDeclaration())
getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
@@ -1654,6 +1692,8 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
// Add linker options to link against the libraries/frameworks
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
+ bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
+ bool IsPS4 = CGM.getTarget().getTriple().isPS4();
// For modules that use export_as for linking, use that module
// name instead.
@@ -1673,11 +1713,19 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
}
// Link against a library.
- llvm::SmallString<24> Opt;
- CGM.getTargetCodeGenInfo().getDependentLibraryOption(
- Mod->LinkLibraries[I-1].Library, Opt);
- auto *OptString = llvm::MDString::get(Context, Opt);
- Metadata.push_back(llvm::MDNode::get(Context, OptString));
+ if (IsELF && !IsPS4) {
+ llvm::Metadata *Args[2] = {
+ llvm::MDString::get(Context, "lib"),
+ llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
+ };
+ Metadata.push_back(llvm::MDNode::get(Context, Args));
+ } else {
+ llvm::SmallString<24> Opt;
+ CGM.getTargetCodeGenInfo().getDependentLibraryOption(
+ Mod->LinkLibraries[I - 1].Library, Opt);
+ auto *OptString = llvm::MDString::get(Context, Opt);
+ Metadata.push_back(llvm::MDNode::get(Context, OptString));
+ }
}
}
@@ -1708,16 +1756,14 @@ void CodeGenModule::EmitModuleLinkOptions() {
bool AnyChildren = false;
// Visit the submodules of this module.
- for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub) {
+ for (const auto &SM : Mod->submodules()) {
// Skip explicit children; they need to be explicitly imported to be
// linked against.
- if ((*Sub)->IsExplicit)
+ if (SM->IsExplicit)
continue;
- if (Visited.insert(*Sub).second) {
- Stack.push_back(*Sub);
+ if (Visited.insert(SM).second) {
+ Stack.push_back(SM);
AnyChildren = true;
}
}
@@ -1747,6 +1793,10 @@ void CodeGenModule::EmitModuleLinkOptions() {
}
void CodeGenModule::EmitDeferred() {
+ // Emit deferred declare target declarations.
+ if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
+ getOpenMPRuntime().emitDeferredTargetDecls();
+
// Emit code for any potentially referenced deferred decls. Since a
// previously unused static decl may become used during the generation of code
// for a static function, iterate until no changes are made.
@@ -1949,9 +1999,6 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
StringRef Category) const {
- if (!LangOpts.XRayInstrument)
- return false;
-
const auto &XRayFilter = getContext().getXRayFilter();
using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
auto Attr = ImbueAttr::NONE;
@@ -1981,6 +2028,13 @@ bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
if (LangOpts.EmitAllDecls)
return true;
+ if (CodeGenOpts.KeepStaticConsts) {
+ const auto *VD = dyn_cast<VarDecl>(Global);
+ if (VD && VD->getType().isConstQualified() &&
+ VD->getStorageDuration() == SD_Static)
+ return true;
+ }
+
return getContext().DeclMustBeEmitted(Global);
}
@@ -2000,7 +2054,8 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// codegen for global variables, because they may be marked as threadprivate.
if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
- !isTypeConstant(Global->getType(), false))
+ !isTypeConstant(Global->getType(), false) &&
+ !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
return false;
return true;
@@ -2141,16 +2196,22 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
} else {
const auto *VD = cast<VarDecl>(Global);
assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
- // We need to emit device-side global CUDA variables even if a
- // variable does not have a definition -- we still need to define
- // host-side shadow for it.
- bool MustEmitForCuda = LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
- !VD->hasDefinition() &&
- (VD->hasAttr<CUDAConstantAttr>() ||
- VD->hasAttr<CUDADeviceAttr>());
- if (!MustEmitForCuda &&
- VD->isThisDeclarationADefinition() != VarDecl::Definition &&
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
!Context.isMSStaticDataMemberInlineDefinition(VD)) {
+ if (LangOpts.OpenMP) {
+ // Emit declaration of the must-be-emitted declare target variable.
+ if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
+ if (*Res == OMPDeclareTargetDeclAttr::MT_To) {
+ (void)GetAddrOfGlobalVar(VD);
+ } else {
+ assert(*Res == OMPDeclareTargetDeclAttr::MT_Link &&
+ "link claue expected.");
+ (void)getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
+ }
+ return;
+ }
+ }
// If this declaration may have caused an inline variable definition to
// change linkage, make sure that it's emitted.
if (Context.getInlineVariableDefinitionKind(VD) ==
@@ -2360,6 +2421,19 @@ bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
return CodeGenOpts.OptimizationLevel > 0;
}
+void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
+ llvm::GlobalValue *GV) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (FD->isCPUSpecificMultiVersion()) {
+ auto *Spec = FD->getAttr<CPUSpecificAttr>();
+ for (unsigned I = 0; I < Spec->cpus_size(); ++I)
+ EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
+ // Requires multiple emits.
+ } else
+ EmitGlobalFunctionDefinition(GD, GV);
+}
+
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
@@ -2367,7 +2441,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
Context.getSourceManager(),
"Generating code for declaration");
- if (isa<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// At -O0, don't generate IR for functions with available_externally
// linkage.
if (!shouldEmitFunction(GD))
@@ -2380,6 +2454,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
+ else if (FD->isMultiVersion())
+ EmitMultiVersionFunctionDefinition(GD, GV);
else
EmitGlobalFunctionDefinition(GD, GV);
@@ -2389,6 +2465,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
return;
}
+ if (FD->isMultiVersion())
+ return EmitMultiVersionFunctionDefinition(GD, GV);
return EmitGlobalFunctionDefinition(GD, GV);
}
@@ -2401,9 +2479,22 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *NewFn);
+static unsigned
+TargetMVPriority(const TargetInfo &TI,
+ const CodeGenFunction::MultiVersionResolverOption &RO) {
+ unsigned Priority = 0;
+ for (StringRef Feat : RO.Conditions.Features)
+ Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
+
+ if (!RO.Conditions.Architecture.empty())
+ Priority = std::max(
+ Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
+ return Priority;
+}
+
void CodeGenModule::emitMultiVersionFunctions() {
for (GlobalDecl GD : MultiVersionFuncs) {
- SmallVector<CodeGenFunction::TargetMultiVersionResolverOption, 10> Options;
+ SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
getContext().forEachMultiversionedFunctionVersion(
FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
@@ -2424,20 +2515,36 @@ void CodeGenModule::emitMultiVersionFunctions() {
}
assert(Func && "This should have just been created");
}
- Options.emplace_back(getTarget(), cast<llvm::Function>(Func),
- CurFD->getAttr<TargetAttr>()->parse());
+
+ const auto *TA = CurFD->getAttr<TargetAttr>();
+ llvm::SmallVector<StringRef, 8> Feats;
+ TA->getAddedFeatures(Feats);
+
+ Options.emplace_back(cast<llvm::Function>(Func),
+ TA->getArchitecture(), Feats);
});
- llvm::Function *ResolverFunc = cast<llvm::Function>(
- GetGlobalValue((getMangledName(GD) + ".resolver").str()));
+ llvm::Function *ResolverFunc;
+ const TargetInfo &TI = getTarget();
+
+ if (TI.supportsIFunc() || FD->isTargetMultiVersion())
+ ResolverFunc = cast<llvm::Function>(
+ GetGlobalValue((getMangledName(GD) + ".resolver").str()));
+ else
+ ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
+
if (supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
+
std::stable_sort(
Options.begin(), Options.end(),
- std::greater<CodeGenFunction::TargetMultiVersionResolverOption>());
+ [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
+ const CodeGenFunction::MultiVersionResolverOption &RHS) {
+ return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
+ });
CodeGenFunction CGF(*this);
- CGF.EmitTargetMultiVersionResolver(ResolverFunc, Options);
+ CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
}
@@ -2446,27 +2553,58 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD && "Not a FunctionDecl?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- llvm::Type *DeclTy = getTypes().ConvertTypeForMem(FD->getType());
+ QualType CanonTy = Context.getCanonicalType(FD->getType());
+ llvm::Type *DeclTy = getTypes().ConvertFunctionType(CanonTy, FD);
+
+ if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
+ DeclTy = getTypes().GetFunctionType(FInfo);
+ }
StringRef ResolverName = getMangledName(GD);
- llvm::Type *ResolverType = llvm::FunctionType::get(
- llvm::PointerType::get(DeclTy,
- Context.getTargetAddressSpace(FD->getType())),
- false);
- auto *ResolverFunc = cast<llvm::Function>(
- GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
- /*ForVTable=*/false));
-
- SmallVector<CodeGenFunction::CPUDispatchMultiVersionResolverOption, 10>
- Options;
+
+ llvm::Type *ResolverType;
+ GlobalDecl ResolverGD;
+ if (getTarget().supportsIFunc())
+ ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(DeclTy,
+ Context.getTargetAddressSpace(FD->getType())),
+ false);
+ else {
+ ResolverType = DeclTy;
+ ResolverGD = GD;
+ }
+
+ auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
+ ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
+
+ SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const TargetInfo &Target = getTarget();
+ unsigned Index = 0;
for (const IdentifierInfo *II : DD->cpus()) {
// Get the name of the target function so we can look it up/create it.
std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
getCPUSpecificMangling(*this, II->getName());
- llvm::Constant *Func = GetOrCreateLLVMFunction(
- MangledName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/false,
- /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
+
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+
+ if (!Func) {
+ GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
+ if (ExistingDecl.getDecl() &&
+ ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
+ EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ if (!ExistingDecl.getDecl())
+ ExistingDecl = GD.getWithMultiVersionIndex(Index);
+
+ Func = GetOrCreateLLVMFunction(
+ MangledName, DeclTy, ExistingDecl,
+ /*ForVTable=*/false, /*DontDefer=*/true,
+ /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
+ }
+ }
+
llvm::SmallVector<StringRef, 32> Features;
Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
llvm::transform(Features, Features.begin(),
@@ -2475,27 +2613,54 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
Features.begin(), Features.end(), [&Target](StringRef Feat) {
return !Target.validateCpuSupports(Feat);
}), Features.end());
- Options.emplace_back(cast<llvm::Function>(Func),
- CodeGenFunction::GetX86CpuSupportsMask(Features));
+ Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
+ ++Index;
}
llvm::sort(
- Options.begin(), Options.end(),
- std::greater<CodeGenFunction::CPUDispatchMultiVersionResolverOption>());
+ Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
+ const CodeGenFunction::MultiVersionResolverOption &RHS) {
+ return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
+ CodeGenFunction::GetX86CpuSupportsMask(RHS.Conditions.Features);
+ });
+
+ // If the list contains multiple 'default' versions, such as when it contains
+ // 'pentium' and 'generic', don't emit the call to the generic one (since we
+ // always run on at least a 'pentium'). We do this by deleting the 'least
+ // advanced' (read, lowest mangling letter).
+ while (Options.size() > 1 &&
+ CodeGenFunction::GetX86CpuSupportsMask(
+ (Options.end() - 2)->Conditions.Features) == 0) {
+ StringRef LHSName = (Options.end() - 2)->Function->getName();
+ StringRef RHSName = (Options.end() - 1)->Function->getName();
+ if (LHSName.compare(RHSName) < 0)
+ Options.erase(Options.end() - 2);
+ else
+ Options.erase(Options.end() - 1);
+ }
+
CodeGenFunction CGF(*this);
- CGF.EmitCPUDispatchMultiVersionResolver(ResolverFunc, Options);
+ CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
-/// If an ifunc for the specified mangled name is not in the module, create and
-/// return an llvm IFunc Function with the specified type.
-llvm::Constant *
-CodeGenModule::GetOrCreateMultiVersionIFunc(GlobalDecl GD, llvm::Type *DeclTy,
- const FunctionDecl *FD) {
+/// If a dispatcher for the specified mangled name is not in the module, create
+/// and return an llvm Function with the specified type.
+llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
+ GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
std::string MangledName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
- std::string IFuncName = MangledName + ".ifunc";
- if (llvm::GlobalValue *IFuncGV = GetGlobalValue(IFuncName))
- return IFuncGV;
+
+ // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
+ // a separate resolver).
+ std::string ResolverName = MangledName;
+ if (getTarget().supportsIFunc())
+ ResolverName += ".ifunc";
+ else if (FD->isTargetMultiVersion())
+ ResolverName += ".resolver";
+
+ // If this already exists, just return that one.
+ if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
+ return ResolverGV;
// Since this is the first time we've created this IFunc, make sure
// that we put this multiversioned function into the list to be
@@ -2503,20 +2668,28 @@ CodeGenModule::GetOrCreateMultiVersionIFunc(GlobalDecl GD, llvm::Type *DeclTy,
if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
MultiVersionFuncs.push_back(GD);
- std::string ResolverName = MangledName + ".resolver";
- llvm::Type *ResolverType = llvm::FunctionType::get(
- llvm::PointerType::get(DeclTy,
- Context.getTargetAddressSpace(FD->getType())),
- false);
- llvm::Constant *Resolver =
- GetOrCreateLLVMFunction(ResolverName, ResolverType, GlobalDecl{},
- /*ForVTable=*/false);
- llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
- DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
- GIF->setName(IFuncName);
- SetCommonAttributes(FD, GIF);
+ if (getTarget().supportsIFunc()) {
+ llvm::Type *ResolverType = llvm::FunctionType::get(
+ llvm::PointerType::get(
+ DeclTy, getContext().getTargetAddressSpace(FD->getType())),
+ false);
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ MangledName + ".resolver", ResolverType, GlobalDecl{},
+ /*ForVTable=*/false);
+ llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
+ DeclTy, 0, llvm::Function::ExternalLinkage, "", Resolver, &getModule());
+ GIF->setName(ResolverName);
+ SetCommonAttributes(FD, GIF);
- return GIF;
+ return GIF;
+ }
+
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
+ assert(isa<llvm::GlobalValue>(Resolver) &&
+ "Resolver should be created for the first time");
+ SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
+ return Resolver;
}
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
@@ -2539,15 +2712,16 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
!OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
!DontDefer && !IsForDefinition) {
- const FunctionDecl *FDDef = FD->getDefinition();
- GlobalDecl GDDef;
- if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
- GDDef = GlobalDecl(CD, GD.getCtorType());
- else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
- GDDef = GlobalDecl(DD, GD.getDtorType());
- else
- GDDef = GlobalDecl(FDDef);
- addDeferredDeclToEmit(GDDef);
+ if (const FunctionDecl *FDDef = FD->getDefinition()) {
+ GlobalDecl GDDef;
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
+ GDDef = GlobalDecl(CD, GD.getCtorType());
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
+ GDDef = GlobalDecl(DD, GD.getDtorType());
+ else
+ GDDef = GlobalDecl(FDDef);
+ EmitGlobal(GDDef);
+ }
}
if (FD->isMultiVersion()) {
@@ -2555,7 +2729,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
if (TA && TA->isDefaultVersion())
UpdateMultiVersionNames(GD, FD);
if (!IsForDefinition)
- return GetOrCreateMultiVersionIFunc(GD, Ty, FD);
+ return GetOrCreateMultiVersionResolver(GD, Ty, FD);
}
}
@@ -3058,10 +3232,9 @@ CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
IsForDefinition);
}
-llvm::GlobalVariable *
-CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
- llvm::Type *Ty,
- llvm::GlobalValue::LinkageTypes Linkage) {
+llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
+ StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
+ unsigned Alignment) {
llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
llvm::GlobalVariable *OldGV = nullptr;
@@ -3097,6 +3270,8 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
!GV->hasAvailableExternallyLinkage())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
+ GV->setAlignment(Alignment);
+
return GV;
}
@@ -3313,8 +3488,15 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// CUDA E.2.4.1 "__shared__ variables cannot have an initialization
// as part of their declaration." Sema has already checked for
// error cases, so we just need to set Init to UndefValue.
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
- D->hasAttr<CUDASharedAttr>())
+ bool IsCUDASharedVar =
+ getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
+ // Shadows of initialized device-side global variables are also left
+ // undefined.
+ bool IsCUDAShadowVar =
+ !getLangOpts().CUDAIsDevice &&
+ (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
+ D->hasAttr<CUDASharedAttr>());
+ if (getLangOpts().CUDA && (IsCUDASharedVar || IsCUDAShadowVar))
Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
@@ -3434,7 +3616,10 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
Flags |= CGCUDARuntime::ExternDeviceVar;
if (D->hasAttr<CUDAConstantAttr>())
Flags |= CGCUDARuntime::ConstantDeviceVar;
- getCUDARuntime().registerDeviceVar(*GV, Flags);
+ // Extern global variables will be registered in the TU where they are
+ // defined.
+ if (!D->hasExternalStorage())
+ getCUDARuntime().registerDeviceVar(*GV, Flags);
} else if (D->hasAttr<CUDASharedAttr>())
// __shared__ variables are odd. Shadows do get created, but
// they are not registered with the CUDA runtime, so they
@@ -3577,6 +3762,15 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
}
}
+ // Microsoft's link.exe doesn't support alignments greater than 32 for common
+ // symbols, so symbols with greater alignment requirements cannot be common.
+ // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
+ // alignments for common symbols via the aligncomm directive, so this
+ // restriction only applies to MSVC environments.
+ if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
+ Context.getTypeAlignIfKnown(D->getType()) > 32)
+ return true;
+
return false;
}
@@ -3592,6 +3786,10 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
return llvm::GlobalVariable::WeakAnyLinkage;
}
+ if (const auto *FD = D->getAsFunction())
+ if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
+ return llvm::GlobalVariable::LinkOnceAnyLinkage;
+
// We are guaranteed to have a strong definition somewhere else,
// so we can use available_externally linkage.
if (Linkage == GVA_AvailableExternally)
@@ -3828,15 +4026,6 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
AddGlobalDtor(Fn, DA->getPriority());
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, Fn);
-
- if (D->isCPUSpecificMultiVersion()) {
- auto *Spec = D->getAttr<CPUSpecificAttr>();
- // If there is another specific version we need to emit, do so here.
- if (Spec->ActiveArgIndex + 1 < Spec->cpus_size()) {
- ++Spec->ActiveArgIndex;
- EmitGlobalFunctionDefinition(GD, nullptr);
- }
- }
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -4030,39 +4219,81 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
+ const ASTContext &Context = getContext();
+ const llvm::Triple &Triple = getTriple();
+
+ const auto CFRuntime = getLangOpts().CFRuntime;
+ const bool IsSwiftABI =
+ static_cast<unsigned>(CFRuntime) >=
+ static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
+ const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
+
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
+ const char *CFConstantStringClassName = "__CFConstantStringClassReference";
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::GlobalValue *GV = cast<llvm::GlobalValue>(
- CreateRuntimeVariable(Ty, "__CFConstantStringClassReference"));
-
- if (getTriple().isOSBinFormatCOFF()) {
- IdentifierInfo &II = getContext().Idents.get(GV->getName());
- TranslationUnitDecl *TUDecl = getContext().getTranslationUnitDecl();
- DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
-
- const VarDecl *VD = nullptr;
- for (const auto &Result : DC->lookup(&II))
- if ((VD = dyn_cast<VarDecl>(Result)))
- break;
-
- if (!VD || !VD->hasAttr<DLLExportAttr>()) {
- GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
- } else {
- GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
- GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+
+ switch (CFRuntime) {
+ default: break;
+ case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
+ case LangOptions::CoreFoundationABI::Swift5_0:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
+ : "$s10Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ case LangOptions::CoreFoundationABI::Swift4_2:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
+ : "$S10Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ case LangOptions::CoreFoundationABI::Swift4_1:
+ CFConstantStringClassName =
+ Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
+ : "__T010Foundation19_NSCFConstantStringCN";
+ Ty = IntPtrTy;
+ break;
+ }
+
+ llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
+
+ if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
+ llvm::GlobalValue *GV = nullptr;
+
+ if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
+ IdentifierInfo &II = Context.Idents.get(GV->getName());
+ TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
+ DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
+
+ const VarDecl *VD = nullptr;
+ for (const auto &Result : DC->lookup(&II))
+ if ((VD = dyn_cast<VarDecl>(Result)))
+ break;
+
+ if (Triple.isOSBinFormatELF()) {
+ if (!VD)
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ if (!VD || !VD->hasAttr<DLLExportAttr>())
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ else
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ }
+
+ setDSOLocal(GV);
}
}
- setDSOLocal(GV);
// Decay array -> ptr
CFConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros);
+ IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
+ : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
}
- QualType CFTy = getContext().getCFConstantStringType();
+ QualType CFTy = Context.getCFConstantStringType();
auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
@@ -4073,7 +4304,12 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
// Flags.
- Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
+ if (IsSwiftABI) {
+ Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
+ Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
+ } else {
+ Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
+ }
// String pointer.
llvm::Constant *C = nullptr;
@@ -4094,17 +4330,20 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
- CharUnits Align = isUTF16
- ? getContext().getTypeAlignInChars(getContext().ShortTy)
- : getContext().getTypeAlignInChars(getContext().CharTy);
+ CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
+ : Context.getTypeAlignInChars(Context.CharTy);
GV->setAlignment(Align.getQuantity());
// FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
// Without it LLVM can merge the string with a non unnamed_addr one during
// LTO. Doing that changes the section it ends in, which surprises ld64.
- if (getTriple().isOSBinFormatMachO())
+ if (Triple.isOSBinFormatMachO())
GV->setSection(isUTF16 ? "__TEXT,__ustring"
: "__TEXT,__cstring,cstring_literals");
+ // Make sure the literal ends up in .rodata to allow for safe ICF and for
+ // the static linker to adjust permissions to read-only later on.
+ else if (Triple.isOSBinFormatELF())
+ GV->setSection(".rodata");
// String.
llvm::Constant *Str =
@@ -4116,8 +4355,17 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields.add(Str);
// String length.
- auto Ty = getTypes().ConvertType(getContext().LongTy);
- Fields.addInt(cast<llvm::IntegerType>(Ty), StringLength);
+ llvm::IntegerType *LengthTy =
+ llvm::IntegerType::get(getModule().getContext(),
+ Context.getTargetInfo().getLongWidth());
+ if (IsSwiftABI) {
+ if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
+ CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
+ LengthTy = Int32Ty;
+ else
+ LengthTy = IntPtrTy;
+ }
+ Fields.addInt(LengthTy, StringLength);
CharUnits Alignment = getPointerAlign();
@@ -4125,7 +4373,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
/*isConstant=*/false,
llvm::GlobalVariable::PrivateLinkage);
- switch (getTriple().getObjectFormat()) {
+ switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
case llvm::Triple::COFF:
@@ -4264,15 +4512,13 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef GlobalVariableName;
llvm::GlobalValue::LinkageTypes LT;
- // Mangle the string literal if the ABI allows for it. However, we cannot
- // do this if we are compiling with ASan or -fwritable-strings because they
- // rely on strings having normal linkage.
- if (!LangOpts.WritableStrings &&
- !LangOpts.Sanitize.has(SanitizerKind::Address) &&
- getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
+ // Mangle the string literal if that's how the ABI merges duplicate strings.
+ // Don't do it if they are writable, since we don't want writes in one TU to
+ // affect strings in another.
+ if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
+ !LangOpts.WritableStrings) {
llvm::raw_svector_ostream Out(MangledNameBuffer);
getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
-
LT = llvm::GlobalValue::LinkOnceODRLinkage;
GlobalVariableName = MangledNameBuffer;
} else {
@@ -4620,6 +4866,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::TypeAliasTemplate:
case Decl::Block:
case Decl::Empty:
+ case Decl::Binding:
break;
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -4787,6 +5034,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
break;
+ case Decl::OMPRequires:
+ EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
+ break;
+
default:
// Make sure we handled everything we should, every other kind is a
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
@@ -4810,7 +5061,7 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
return;
SourceManager &SM = getContext().getSourceManager();
- if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getLocStart()))
+ if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
return;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
@@ -4981,6 +5232,16 @@ void CodeGenModule::EmitVersionIdentMetadata() {
IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
}
+void CodeGenModule::EmitCommandLineMetadata() {
+ llvm::NamedMDNode *CommandLineMetadata =
+ TheModule.getOrInsertNamedMetadata("llvm.commandline");
+ std::string CommandLine = getCodeGenOpts().RecordCommandLine;
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+
+ llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
+ CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
+}
+
void CodeGenModule::EmitTargetMetadata() {
// Warning, new MangledDeclNames may be appended within this loop.
// We rely on MapVector insertions adding new elements to the end
@@ -5073,7 +5334,7 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
- VD, Addr, RefExpr->getLocStart(), PerformInit))
+ VD, Addr, RefExpr->getBeginLoc(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
}
}
@@ -5196,8 +5457,9 @@ TargetAttr::ParsedTargetAttr CodeGenModule::filterFunctionTargetAttrs(const Targ
// Fills in the supplied string map with the set of target features for the
// passed in function.
void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
- const FunctionDecl *FD) {
+ GlobalDecl GD) {
StringRef TargetCPU = Target.getTargetOpts().CPU;
+ const FunctionDecl *FD = GD.getDecl()->getAsFunction();
if (const auto *TD = FD->getAttr<TargetAttr>()) {
TargetAttr::ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD);
@@ -5219,8 +5481,8 @@ void CodeGenModule::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
ParsedAttr.Features);
} else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
llvm::SmallVector<StringRef, 32> FeaturesTmp;
- Target.getCPUSpecificCPUDispatchFeatures(SD->getCurCPUName()->getName(),
- FeaturesTmp);
+ Target.getCPUSpecificCPUDispatchFeatures(
+ SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp);
std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
Target.initFeatureMap(FeatureMap, getDiags(), TargetCPU, Features);
} else {
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 91f3d94330f1..75679d11c13c 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -119,15 +119,29 @@ struct OrderGlobalInits {
struct ObjCEntrypoints {
ObjCEntrypoints() { memset(this, 0, sizeof(*this)); }
- /// void objc_autoreleasePoolPop(void*);
+ /// void objc_alloc(id);
+ llvm::Constant *objc_alloc;
+
+ /// void objc_allocWithZone(id);
+ llvm::Constant *objc_allocWithZone;
+
+ /// void objc_autoreleasePoolPop(void*);
llvm::Constant *objc_autoreleasePoolPop;
+ /// void objc_autoreleasePoolPop(void*);
+ /// Note this method is used when we are using exception handling
+ llvm::Constant *objc_autoreleasePoolPopInvoke;
+
/// void *objc_autoreleasePoolPush(void);
llvm::Constant *objc_autoreleasePoolPush;
/// id objc_autorelease(id);
llvm::Constant *objc_autorelease;
+ /// id objc_autorelease(id);
+ /// Note this is the runtime method not the intrinsic.
+ llvm::Constant *objc_autoreleaseRuntimeFunction;
+
/// id objc_autoreleaseReturnValue(id);
llvm::Constant *objc_autoreleaseReturnValue;
@@ -152,6 +166,10 @@ struct ObjCEntrypoints {
/// id objc_retain(id);
llvm::Constant *objc_retain;
+ /// id objc_retain(id);
+ /// Note this is the runtime method not the intrinsic.
+ llvm::Constant *objc_retainRuntimeFunction;
+
/// id objc_retainAutorelease(id);
llvm::Constant *objc_retainAutorelease;
@@ -167,6 +185,10 @@ struct ObjCEntrypoints {
/// void objc_release(id);
llvm::Constant *objc_release;
+ /// void objc_release(id);
+ /// Note this is the runtime method not the intrinsic.
+ llvm::Constant *objc_releaseRuntimeFunction;
+
/// void objc_storeStrong(id*, id);
llvm::Constant *objc_storeStrong;
@@ -764,7 +786,8 @@ public:
/// bitcast to the new variable.
llvm::GlobalVariable *
CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
- llvm::GlobalValue::LinkageTypes Linkage);
+ llvm::GlobalValue::LinkageTypes Linkage,
+ unsigned Alignment);
llvm::Function *
CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name,
@@ -1042,8 +1065,7 @@ public:
const CGFunctionInfo &FI);
/// Set the LLVM function attributes (sext, zext, etc).
- void SetLLVMFunctionAttributes(const Decl *D,
- const CGFunctionInfo &Info,
+ void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info,
llvm::Function *F);
/// Set the LLVM function attributes which only apply to a function
@@ -1103,8 +1125,7 @@ public:
// Fills in the supplied string map with the set of target features for the
// passed in function.
- void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
- const FunctionDecl *FD);
+ void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, GlobalDecl GD);
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
@@ -1223,6 +1244,10 @@ public:
void EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
CodeGenFunction *CGF = nullptr);
+ /// Emit a code for requires directive.
+ /// \param D Requires declaration
+ void EmitOMPRequiresDecl(const OMPRequiresDecl *D);
+
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
@@ -1288,9 +1313,9 @@ private:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
- llvm::Constant *GetOrCreateMultiVersionIFunc(GlobalDecl GD,
- llvm::Type *DeclTy,
- const FunctionDecl *FD);
+ llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
+ llvm::Type *DeclTy,
+ const FunctionDecl *FD);
void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
@@ -1299,7 +1324,7 @@ private:
ForDefinition_t IsForDefinition
= NotForDefinition);
- bool GetCPUAndFeaturesAttributes(const Decl *D,
+ bool GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &AttrBuilder);
void setNonAliasAttributes(GlobalDecl GD, llvm::GlobalObject *GO);
@@ -1310,6 +1335,8 @@ private:
void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr);
void EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
+ void EmitMultiVersionFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV);
+
void EmitGlobalVarDefinition(const VarDecl *D, bool IsTentative = false);
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
@@ -1397,6 +1424,9 @@ private:
/// Emit the Clang version as llvm.ident metadata.
void EmitVersionIdentMetadata();
+ /// Emit the Clang commandline as llvm.commandline metadata.
+ void EmitCommandLineMetadata();
+
/// Emits target specific Metadata for global declarations.
void EmitTargetMetadata();
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index c8c2a1b956b8..776060743a63 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -165,7 +165,12 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
bool TraverseBlockExpr(BlockExpr *BE) { return true; }
- bool TraverseLambdaBody(LambdaExpr *LE) { return true; }
+ bool TraverseLambdaExpr(LambdaExpr *LE) {
+ // Traverse the captures, but not the body.
+ for (const auto &C : zip(LE->captures(), LE->capture_inits()))
+ TraverseLambdaCapture(LE, &std::get<0>(C), std::get<1>(C));
+ return true;
+ }
bool TraverseCapturedStmt(CapturedStmt *CS) { return true; }
bool VisitDecl(const Decl *D) {
@@ -544,6 +549,8 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
RecordStmtCount(S);
+ if (S->getInit())
+ Visit(S->getInit());
Visit(S->getLoopVarStmt());
Visit(S->getRangeStmt());
Visit(S->getBeginStmt());
@@ -815,7 +822,7 @@ bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) {
// Don't map the functions in system headers.
const auto &SM = CGM.getContext().getSourceManager();
- auto Loc = D->getBody()->getLocStart();
+ auto Loc = D->getBody()->getBeginLoc();
return SM.isInSystemHeader(Loc);
}
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index 0759e65388b8..120ab651a4a8 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -17,7 +17,6 @@
#include "CGBuilder.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include <array>
#include <memory>
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index ec48231e5247..27d39716d22f 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 1a1395e6ae74..2acf1ac16180 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -503,6 +503,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 626869f00021..8e344e91b8cd 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -17,7 +17,6 @@
#include "CGCall.h"
#include "clang/Basic/ABI.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/Sema/Sema.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Module.h"
diff --git a/lib/CodeGen/ConstantEmitter.h b/lib/CodeGen/ConstantEmitter.h
index b4d1b65743c7..7ad8e5d37cd1 100644
--- a/lib/CodeGen/ConstantEmitter.h
+++ b/lib/CodeGen/ConstantEmitter.h
@@ -38,6 +38,9 @@ private:
/// Whether the constant-emission failed.
bool Failed = false;
+ /// Whether we're in a constant context.
+ bool InConstantContext = false;
+
/// The AST address space where this (non-abstract) initializer is going.
/// Used for generating appropriate placeholders.
LangAS DestAddressSpace;
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index 2d8446463594..35962c73d9a8 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -67,7 +67,7 @@ public:
void setStartLoc(SourceLocation Loc) { LocStart = Loc; }
- SourceLocation getStartLoc() const {
+ SourceLocation getBeginLoc() const {
assert(LocStart && "Region has no start location");
return *LocStart;
}
@@ -116,7 +116,7 @@ struct SpellingRegion {
}
SpellingRegion(SourceManager &SM, SourceMappingRegion &R)
- : SpellingRegion(SM, R.getStartLoc(), R.getEndLoc()) {}
+ : SpellingRegion(SM, R.getBeginLoc(), R.getEndLoc()) {}
/// Check if the start and end locations appear in source order, i.e
/// top->bottom, left->right.
@@ -204,7 +204,7 @@ public:
/// Get the start of \c S ignoring macro arguments and builtin macros.
SourceLocation getStart(const Stmt *S) {
- SourceLocation Loc = S->getLocStart();
+ SourceLocation Loc = S->getBeginLoc();
while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
return Loc;
@@ -212,7 +212,7 @@ public:
/// Get the end of \c S ignoring macro arguments and builtin macros.
SourceLocation getEnd(const Stmt *S) {
- SourceLocation Loc = S->getLocEnd();
+ SourceLocation Loc = S->getEndLoc();
while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
return getPreciseTokenLocEnd(Loc);
@@ -229,7 +229,7 @@ public:
llvm::SmallSet<FileID, 8> Visited;
SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs;
for (const auto &Region : SourceRegions) {
- SourceLocation Loc = Region.getStartLoc();
+ SourceLocation Loc = Region.getBeginLoc();
FileID File = SM.getFileID(Loc);
if (!Visited.insert(File).second)
continue;
@@ -311,7 +311,7 @@ public:
for (const auto &Region : SourceRegions) {
assert(Region.hasEndLoc() && "incomplete region");
- SourceLocation LocStart = Region.getStartLoc();
+ SourceLocation LocStart = Region.getBeginLoc();
assert(SM.getFileID(LocStart).isValid() && "region in invalid file");
// Ignore regions from system headers.
@@ -502,7 +502,7 @@ struct CounterCoverageMappingBuilder
DeferredRegion = None;
// If the region ends in an expansion, find the expansion site.
- FileID StartFile = SM.getFileID(DR.getStartLoc());
+ FileID StartFile = SM.getFileID(DR.getBeginLoc());
if (SM.getFileID(DeferredEndLoc) != StartFile) {
if (isNestedIn(DeferredEndLoc, StartFile)) {
do {
@@ -515,12 +515,12 @@ struct CounterCoverageMappingBuilder
// The parent of this deferred region ends where the containing decl ends,
// so the region isn't useful.
- if (DR.getStartLoc() == DeferredEndLoc)
+ if (DR.getBeginLoc() == DeferredEndLoc)
return Index;
// If we're visiting statements in non-source order (e.g switch cases or
// a loop condition) we can't construct a sensible deferred region.
- if (!SpellingRegion(SM, DR.getStartLoc(), DeferredEndLoc).isInSourceOrder())
+ if (!SpellingRegion(SM, DR.getBeginLoc(), DeferredEndLoc).isInSourceOrder())
return Index;
DR.setGap(true);
@@ -552,6 +552,15 @@ struct CounterCoverageMappingBuilder
completeDeferred(Count, DeferredEndLoc);
}
+ size_t locationDepth(SourceLocation Loc) {
+ size_t Depth = 0;
+ while (Loc.isValid()) {
+ Loc = getIncludeOrExpansionLoc(Loc);
+ Depth++;
+ }
+ return Depth;
+ }
+
/// Pop regions from the stack into the function's list of regions.
///
/// Adds all regions from \c ParentIndex to the top of the stack to the
@@ -562,23 +571,45 @@ struct CounterCoverageMappingBuilder
while (RegionStack.size() > ParentIndex) {
SourceMappingRegion &Region = RegionStack.back();
if (Region.hasStartLoc()) {
- SourceLocation StartLoc = Region.getStartLoc();
+ SourceLocation StartLoc = Region.getBeginLoc();
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
: RegionStack[ParentIndex].getEndLoc();
+ size_t StartDepth = locationDepth(StartLoc);
+ size_t EndDepth = locationDepth(EndLoc);
while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) {
- // The region ends in a nested file or macro expansion. Create a
- // separate region for each expansion.
- SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
- assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
-
- if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
- SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
-
- EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
- if (EndLoc.isInvalid())
- llvm::report_fatal_error("File exit not handled before popRegions");
+ bool UnnestStart = StartDepth >= EndDepth;
+ bool UnnestEnd = EndDepth >= StartDepth;
+ if (UnnestEnd) {
+ // The region ends in a nested file or macro expansion. Create a
+ // separate region for each expansion.
+ SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
+ assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
+
+ if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
+ SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
+
+ EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
+ if (EndLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
+ EndDepth--;
+ }
+ if (UnnestStart) {
+ // The region begins in a nested file or macro expansion. Create a
+ // separate region for each expansion.
+ SourceLocation NestedLoc = getEndOfFileOrMacro(StartLoc);
+ assert(SM.isWrittenInSameFile(StartLoc, NestedLoc));
+
+ if (!isRegionAlreadyAdded(StartLoc, NestedLoc))
+ SourceRegions.emplace_back(Region.getCounter(), StartLoc, NestedLoc);
+
+ StartLoc = getIncludeOrExpansionLoc(StartLoc);
+ if (StartLoc.isInvalid())
+ llvm::report_fatal_error("File exit not handled before popRegions");
+ StartDepth--;
+ }
}
+ Region.setStartLoc(StartLoc);
Region.setEndLoc(EndLoc);
MostRecentLocation = EndLoc;
@@ -588,7 +619,7 @@ struct CounterCoverageMappingBuilder
EndLoc == getEndOfFileOrMacro(EndLoc))
MostRecentLocation = getIncludeOrExpansionLoc(EndLoc);
- assert(SM.isWrittenInSameFile(Region.getStartLoc(), EndLoc));
+ assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc));
assert(SpellingRegion(SM, Region).isInSourceOrder());
SourceRegions.push_back(Region);
@@ -625,18 +656,21 @@ struct CounterCoverageMappingBuilder
return RegionStack.back();
}
- /// Propagate counts through the children of \c S.
- Counter propagateCounts(Counter TopCount, const Stmt *S) {
+ /// Propagate counts through the children of \p S if \p VisitChildren is true.
+ /// Otherwise, only emit a count for \p S itself.
+ Counter propagateCounts(Counter TopCount, const Stmt *S,
+ bool VisitChildren = true) {
SourceLocation StartLoc = getStart(S);
SourceLocation EndLoc = getEnd(S);
size_t Index = pushRegion(TopCount, StartLoc, EndLoc);
- Visit(S);
+ if (VisitChildren)
+ Visit(S);
Counter ExitCount = getRegion().getCounter();
popRegions(Index);
// The statement may be spanned by an expansion. Make sure we handle a file
// exit out of this expansion before moving to the next statement.
- if (SM.isBeforeInTranslationUnit(StartLoc, S->getLocStart()))
+ if (SM.isBeforeInTranslationUnit(StartLoc, S->getBeginLoc()))
MostRecentLocation = EndLoc;
return ExitCount;
@@ -648,7 +682,7 @@ struct CounterCoverageMappingBuilder
return SourceRegions.rend() !=
std::find_if(SourceRegions.rbegin(), SourceRegions.rend(),
[&](const SourceMappingRegion &Region) {
- return Region.getStartLoc() == StartLoc &&
+ return Region.getBeginLoc() == StartLoc &&
Region.getEndLoc() == EndLoc;
});
}
@@ -700,7 +734,7 @@ struct CounterCoverageMappingBuilder
for (SourceMappingRegion &I : llvm::reverse(RegionStack)) {
if (!I.hasStartLoc())
continue;
- SourceLocation Loc = I.getStartLoc();
+ SourceLocation Loc = I.getBeginLoc();
if (!isNestedIn(Loc, ParentFile)) {
ParentCounter = I.getCounter();
break;
@@ -826,7 +860,7 @@ struct CounterCoverageMappingBuilder
}
void VisitStmt(const Stmt *S) {
- if (S->getLocStart().isValid())
+ if (S->getBeginLoc().isValid())
extendRegion(S);
for (const Stmt *Child : S->children())
if (Child)
@@ -843,7 +877,16 @@ struct CounterCoverageMappingBuilder
if (Body && SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
return;
- propagateCounts(getRegionCounter(Body), Body);
+ // Do not visit the artificial children nodes of defaulted methods. The
+ // lexer may not be able to report back precise token end locations for
+ // these children nodes (llvm.org/PR39822), and moreover users will not be
+ // able to see coverage for them.
+ bool Defaulted = false;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(D))
+ Defaulted = Method->isDefaulted();
+
+ propagateCounts(getRegionCounter(Body), Body,
+ /*VisitChildren=*/!Defaulted);
assert(RegionStack.empty() && "Regions entered but never exited");
// Discard the last uncompleted deferred region in a decl, if one exists.
@@ -1004,6 +1047,8 @@ struct CounterCoverageMappingBuilder
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
extendRegion(S);
+ if (S->getInit())
+ Visit(S->getInit());
Visit(S->getLoopVarStmt());
Visit(S->getRangeStmt());
@@ -1109,7 +1154,7 @@ struct CounterCoverageMappingBuilder
Counter Count = addCounters(Parent.getCounter(), getRegionCounter(S));
// Reuse the existing region if it starts at our label. This is typical of
// the first case in a switch.
- if (Parent.hasStartLoc() && Parent.getStartLoc() == getStart(S))
+ if (Parent.hasStartLoc() && Parent.getBeginLoc() == getStart(S))
Parent.setCounter(Count);
else
pushRegion(Count, getStart(S));
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
index b08ad896d7a5..c62db096952a 100644
--- a/lib/CodeGen/CoverageMappingGen.h
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -16,7 +16,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/PPCallbacks.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/GlobalValue.h"
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 00fff144b597..b53304528c3d 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -287,6 +287,7 @@ public:
void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
+ bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
bool ReturnAdjustment) override {
@@ -634,7 +635,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
if (ShouldEmitCFICheck) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getLocStart());
+ CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
@@ -1562,9 +1563,8 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
Type != Dtor_Base && DD->isVirtual())
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
else
- Callee =
- CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
- DD);
+ Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)), GD);
CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
This.getPointer(), VTT, VTTTy,
@@ -1598,12 +1598,6 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
// Set the right visibility.
CGM.setGVProperties(VTable, RD);
- // Use pointer alignment for the vtable. Otherwise we would align them based
- // on the size of the initializer which doesn't make sense as only single
- // values are read.
- unsigned PAlign = CGM.getTarget().getPointerAlign(0);
- VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity());
-
// If this is the magic class __cxxabiv1::__fundamental_type_info,
// we will emit the typeinfo for the fundamental types. This is the
// same behaviour as GCC.
@@ -1703,8 +1697,14 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CGM.getItaniumVTableContext().getVTableLayout(RD);
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
+ // Use pointer alignment for the vtable. Otherwise we would align them based
+ // on the size of the initializer which doesn't make sense as only single
+ // values are read.
+ unsigned PAlign = CGM.getTarget().getPointerAlign(0);
+
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
- Name, VTableType, llvm::GlobalValue::ExternalLinkage);
+ Name, VTableType, llvm::GlobalValue::ExternalLinkage,
+ getContext().toCharUnitsFromBits(PAlign).getQuantity());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.setGVProperties(VTable, RD);
@@ -1750,7 +1750,7 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = VFuncLoad;
}
- CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
@@ -1778,7 +1778,8 @@ void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
}
-bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
+ const CXXRecordDecl *RD) const {
// We don't emit available_externally vtables if we are in -fapple-kext mode
// because kext mode does not permit devirtualization.
if (CGM.getLangOpts().AppleKext)
@@ -1796,7 +1797,43 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
// to emit an available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyUnusedVirtualInlineFunction(RD);
+ if (hasAnyUnusedVirtualInlineFunction(RD))
+ return false;
+
+ // For a class with virtual bases, we must also be able to speculatively
+ // emit the VTT, because CodeGen doesn't have separate notions of "can emit
+ // the vtable" and "can emit the VTT". For a base subobject, this means we
+ // need to be able to emit non-virtual base vtables.
+ if (RD->getNumVBases()) {
+ for (const auto &B : RD->bases()) {
+ auto *BRD = B.getType()->getAsCXXRecordDecl();
+ assert(BRD && "no class for base specifier");
+ if (B.isVirtual() || !BRD->isDynamicClass())
+ continue;
+ if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
+ if (!canSpeculativelyEmitVTableAsBaseClass(RD))
+ return false;
+
+ // For a complete-object vtable (or more specifically, for the VTT), we need
+ // to be able to speculatively emit the vtables of all dynamic virtual bases.
+ for (const auto &B : RD->vbases()) {
+ auto *BRD = B.getType()->getAsCXXRecordDecl();
+ assert(BRD && "no class for base specifier");
+ if (!BRD->isDynamicClass())
+ continue;
+ if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
+ return false;
+ }
+
+ return true;
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -1916,7 +1953,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
// Handle the array cookie specially in ASan.
if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
(expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
- CGM.getCodeGenOpts().SanitizeAddressPoisonClassMemberArrayNewCookie)) {
+ CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
// The store to the CookiePtr does not need to be instrumented.
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
llvm::FunctionType *FTy =
@@ -2315,11 +2352,13 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
SourceLocation());
ASTContext &Ctx = getContext();
+ QualType ReturnTy = Ctx.VoidTy;
+ QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- &Ctx.Idents.get(GlobalInitFnName), Ctx.VoidTy, nullptr, SC_Static,
+ &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
false, false);
- CGF.StartFunction(GlobalDecl(FD), getContext().VoidTy, GlobalInitFn,
+ CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
getTypes().arrangeNullaryFunction(), FunctionArgList(),
SourceLocation(), SourceLocation());
@@ -2342,6 +2381,9 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
const VarDecl &D,
llvm::Constant *dtor,
llvm::Constant *addr) {
+ if (D.isNoDestroy(CGM.getContext()))
+ return;
+
// Use __cxa_atexit if available.
if (CGM.getCodeGenOpts().CXAAtExit)
return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
@@ -2415,7 +2457,7 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
WrapperName.str(), &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(nullptr, FI, Wrapper);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
if (VD->hasDefinition())
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
@@ -2469,8 +2511,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
CharUnits GuardAlign = CharUnits::One();
Guard->setAlignment(GuardAlign.getQuantity());
- CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, OrderedInits,
- Address(Guard, GuardAlign));
+ CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
+ InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
// On Darwin platforms, use CXX_FAST_TLS calling convention.
if (CGM.getTarget().getTriple().isOSDarwin()) {
InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -2522,7 +2564,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::GlobalVariable::ExternalWeakLinkage,
InitFnName.str(), &CGM.getModule());
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- CGM.SetLLVMFunctionAttributes(nullptr, FI, cast<llvm::Function>(Init));
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
+ cast<llvm::Function>(Init));
}
if (Init) {
@@ -2722,9 +2765,10 @@ llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
// get the mangled name of the type.
llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
Name.substr(4));
+ auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
- llvm::GlobalVariable *GV =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+ llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
+ Name, Init->getType(), Linkage, Align.getQuantity());
GV->setInitializer(Init);
@@ -2808,6 +2852,9 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
@@ -3084,7 +3131,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
}
assert(isa<ObjCInterfaceType>(Ty));
- // Fall through.
+ LLVM_FALLTHROUGH;
case Type::ObjCInterface:
if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
@@ -3363,6 +3410,10 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(M.getOrInsertComdat(GV->getName()));
+ CharUnits Align =
+ CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
+ GV->setAlignment(Align.getQuantity());
+
// The Itanium ABI specifies that type_info objects must be globally
// unique, with one exception: if the type is an incomplete class
// type or a (possibly indirect) pointer to one. That exception
@@ -4017,7 +4068,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
switch (CatchType.getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
CastExn = CGF.EmitARCRetainNonBlock(CastExn);
- // fallthrough
+ LLVM_FALLTHROUGH;
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -4146,7 +4197,7 @@ void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
// Emit the local.
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
- InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
+ InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
CGF.EmitAutoVarCleanups(var);
}
diff --git a/lib/CodeGen/MacroPPCallbacks.cpp b/lib/CodeGen/MacroPPCallbacks.cpp
index 48dea7d54b1e..013ca15e2391 100644
--- a/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/lib/CodeGen/MacroPPCallbacks.cpp
@@ -14,7 +14,8 @@
#include "MacroPPCallbacks.h"
#include "CGDebugInfo.h"
#include "clang/CodeGen/ModuleBuilder.h"
-#include "clang/Parse/Parser.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
using namespace clang;
@@ -88,16 +89,6 @@ SourceLocation MacroPPCallbacks::getCorrectLocation(SourceLocation Loc) {
return SourceLocation();
}
-static bool isBuiltinFile(SourceManager &SM, SourceLocation Loc) {
- StringRef Filename(SM.getPresumedLoc(Loc).getFilename());
- return Filename.equals("<built-in>");
-}
-
-static bool isCommandLineFile(SourceManager &SM, SourceLocation Loc) {
- StringRef Filename(SM.getPresumedLoc(Loc).getFilename());
- return Filename.equals("<command line>");
-}
-
void MacroPPCallbacks::updateStatusToNextScope() {
switch (Status) {
case NoScope:
@@ -127,7 +118,7 @@ void MacroPPCallbacks::FileEntered(SourceLocation Loc) {
updateStatusToNextScope();
return;
case BuiltinScope:
- if (isCommandLineFile(PP.getSourceManager(), Loc))
+ if (PP.getSourceManager().isWrittenInCommandLineFile(Loc))
return;
updateStatusToNextScope();
LLVM_FALLTHROUGH;
@@ -147,7 +138,7 @@ void MacroPPCallbacks::FileExited(SourceLocation Loc) {
default:
llvm_unreachable("Do not expect to exit a file from current scope");
case BuiltinScope:
- if (!isBuiltinFile(PP.getSourceManager(), Loc))
+ if (!PP.getSourceManager().isWrittenInBuiltinFile(Loc))
// Skip next scope and change status to MainFileScope.
Status = MainFileScope;
return;
diff --git a/lib/CodeGen/MacroPPCallbacks.h b/lib/CodeGen/MacroPPCallbacks.h
index 48c67e2d36ad..b87a4005d481 100644
--- a/lib/CodeGen/MacroPPCallbacks.h
+++ b/lib/CodeGen/MacroPPCallbacks.h
@@ -11,6 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LIB_CODEGEN_MACROPPCALLBACKS_H
+#define LLVM_CLANG_LIB_CODEGEN_MACROPPCALLBACKS_H
+
#include "clang/Lex/PPCallbacks.h"
namespace llvm {
@@ -116,3 +119,5 @@ public:
};
} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 059adb78ca30..5545bc6647e6 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -1552,9 +1552,9 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
if (Type == Dtor_Complete && DD->getParent()->getNumVBases() == 0)
Type = Dtor_Base;
- CGCallee Callee = CGCallee::forDirect(
- CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
- DD);
+ CGCallee Callee =
+ CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
+ GlobalDecl(DD, Type));
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
@@ -1872,7 +1872,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
- CGCallee Callee(MethodDecl->getCanonicalDecl(), VFunc);
+ CGCallee Callee(GD, VFunc);
return Callee;
}
@@ -2024,8 +2024,10 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
assert(!CGM.getModule().getNamedGlobal(Name) &&
"vbtable with this name already exists: mangling bug?");
- llvm::GlobalVariable *GV =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, VBTableType, Linkage);
+ CharUnits Alignment =
+ CGM.getContext().getTypeAlignInChars(CGM.getContext().IntTy);
+ llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
+ Name, VBTableType, Linkage, Alignment.getQuantity());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
if (RD->hasAttr<DLLImportAttr>())
@@ -2240,6 +2242,9 @@ static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Dtor,
llvm::Constant *Addr) {
+ if (D.isNoDestroy(CGM.getContext()))
+ return;
+
if (D.getTLSKind())
return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
@@ -3924,7 +3929,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CallArgList Args;
// Push the this ptr.
- Args.add(RValue::get(This), CD->getThisType(getContext()));
+ Args.add(RValue::get(This), CD->getThisType());
// Push the src ptr.
if (SrcVal)
@@ -3951,7 +3956,8 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
// Call the destructor with our arguments.
llvm::Constant *CalleePtr =
CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
- CGCallee Callee = CGCallee::forDirect(CalleePtr, CD);
+ CGCallee Callee =
+ CGCallee::forDirect(CalleePtr, GlobalDecl(CD, Ctor_Complete));
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
Args, CD, Ctor_Complete, ExtraArgs.Prefix, ExtraArgs.Suffix);
CGF.EmitCall(CalleeInfo, Callee, ReturnValueSlot(), Args);
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 8aa9bfb421b4..c0a37698e762 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -17,9 +17,9 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
@@ -64,7 +64,7 @@ namespace {
std::unique_ptr<CodeGen::CodeGenModule> Builder;
private:
- SmallVector<CXXMethodDecl *, 8> DeferredInlineMethodDefinitions;
+ SmallVector<FunctionDecl *, 8> DeferredInlineMemberFuncDefs;
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, llvm::StringRef ModuleName,
@@ -80,7 +80,7 @@ namespace {
~CodeGeneratorImpl() override {
// There should normally not be any leftover inline method definitions.
- assert(DeferredInlineMethodDefinitions.empty() ||
+ assert(DeferredInlineMemberFuncDefs.empty() ||
Diags.hasErrorOccurred());
}
@@ -132,6 +132,9 @@ namespace {
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
M->setDataLayout(Ctx->getTargetInfo().getDataLayout());
+ const auto &SDKVersion = Ctx->getTargetInfo().getSDKVersion();
+ if (!SDKVersion.empty())
+ M->setSDKVersion(SDKVersion);
Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
PreprocessorOpts, CodeGenOpts,
*M, Diags, CoverageInfo));
@@ -163,16 +166,16 @@ namespace {
}
void EmitDeferredDecls() {
- if (DeferredInlineMethodDefinitions.empty())
+ if (DeferredInlineMemberFuncDefs.empty())
return;
// Emit any deferred inline method definitions. Note that more deferred
// methods may be added during this loop, since ASTConsumer callbacks
// can be invoked if AST inspection results in declarations being added.
HandlingTopLevelDeclRAII HandlingDecl(*this);
- for (unsigned I = 0; I != DeferredInlineMethodDefinitions.size(); ++I)
- Builder->EmitTopLevelDecl(DeferredInlineMethodDefinitions[I]);
- DeferredInlineMethodDefinitions.clear();
+ for (unsigned I = 0; I != DeferredInlineMemberFuncDefs.size(); ++I)
+ Builder->EmitTopLevelDecl(DeferredInlineMemberFuncDefs[I]);
+ DeferredInlineMemberFuncDefs.clear();
}
void HandleInlineFunctionDefinition(FunctionDecl *D) override {
@@ -181,17 +184,6 @@ namespace {
assert(D->doesThisDeclarationHaveABody());
- // Handle friend functions.
- if (D->isInIdentifierNamespace(Decl::IDNS_OrdinaryFriend)) {
- if (Ctx->getTargetInfo().getCXXABI().isMicrosoft()
- && !D->getLexicalDeclContext()->isDependentContext())
- Builder->EmitTopLevelDecl(D);
- return;
- }
-
- // Otherwise, must be a method.
- auto MD = cast<CXXMethodDecl>(D);
-
// We may want to emit this definition. However, that decision might be
// based on computing the linkage, and we have to defer that in case we
// are inside of something that will change the method's final linkage,
@@ -200,13 +192,13 @@ namespace {
// void bar();
// void foo() { bar(); }
// } A;
- DeferredInlineMethodDefinitions.push_back(MD);
+ DeferredInlineMemberFuncDefs.push_back(D);
// Provide some coverage mapping even for methods that aren't emitted.
// Don't do this for templated classes though, as they may not be
// instantiable.
- if (!MD->getParent()->isDependentContext())
- Builder->AddDeferredUnusedCoverageMapping(MD);
+ if (!D->getLexicalDeclContext()->isDependentContext())
+ Builder->AddDeferredUnusedCoverageMapping(D);
}
/// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index c164cec5d942..6f00c836f93d 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -14,14 +14,13 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/BackendUtil.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
@@ -156,6 +155,8 @@ public:
LangOpts.CurrentModule.empty() ? MainFileName : LangOpts.CurrentModule;
CodeGenOpts.setDebugInfo(codegenoptions::FullDebugInfo);
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
+ CodeGenOpts.DebugPrefixMap =
+ CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
}
~PCHContainerGenerator() override = default;
diff --git a/lib/CodeGen/SwiftCallingConv.cpp b/lib/CodeGen/SwiftCallingConv.cpp
index b411a501ea81..75a0fa5ce189 100644
--- a/lib/CodeGen/SwiftCallingConv.cpp
+++ b/lib/CodeGen/SwiftCallingConv.cpp
@@ -415,6 +415,40 @@ static bool areBytesInSameUnit(CharUnits first, CharUnits second,
== getOffsetAtStartOfUnit(second, chunkSize);
}
+static bool isMergeableEntryType(llvm::Type *type) {
+ // Opaquely-typed memory is always mergeable.
+ if (type == nullptr) return true;
+
+ // Pointers and integers are always mergeable. In theory we should not
+ // merge pointers, but (1) it doesn't currently matter in practice because
+ // the chunk size is never greater than the size of a pointer and (2)
+ // Swift IRGen uses integer types for a lot of things that are "really"
+ // just storing pointers (like Optional<SomePointer>). If we ever have a
+ // target that would otherwise combine pointers, we should put some effort
+ // into fixing those cases in Swift IRGen and then call out pointer types
+ // here.
+
+ // Floating-point and vector types should never be merged.
+ // Most such types are too large and highly-aligned to ever trigger merging
+ // in practice, but it's important for the rule to cover at least 'half'
+ // and 'float', as well as things like small vectors of 'i1' or 'i8'.
+ return (!type->isFloatingPointTy() && !type->isVectorTy());
+}
+
+bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
+ const StorageEntry &second,
+ CharUnits chunkSize) {
+ // Only merge entries that overlap the same chunk. We test this first
+ // despite being a bit more expensive because this is the condition that
+ // tends to prevent merging.
+ if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
+ chunkSize))
+ return false;
+
+ return (isMergeableEntryType(first.Type) &&
+ isMergeableEntryType(second.Type));
+}
+
void SwiftAggLowering::finish() {
if (Entries.empty()) {
Finished = true;
@@ -425,12 +459,12 @@ void SwiftAggLowering::finish() {
// which is generally the size of a pointer.
const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
- // First pass: if two entries share a chunk, make them both opaque
+ // First pass: if two entries should be merged, make them both opaque
// and stretch one to meet the next.
+ // Also, remember if there are any opaque entries.
bool hasOpaqueEntries = (Entries[0].Type == nullptr);
for (size_t i = 1, e = Entries.size(); i != e; ++i) {
- if (areBytesInSameUnit(Entries[i - 1].End - CharUnits::One(),
- Entries[i].Begin, chunkSize)) {
+ if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
Entries[i - 1].Type = nullptr;
Entries[i].Type = nullptr;
Entries[i - 1].End = Entries[i].Begin;
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 6f6c5f50c2e7..89ec73670a73 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -19,9 +19,9 @@
#include "CGValue.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
@@ -720,10 +720,12 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
// This is a very simple ABI that relies a lot on DefaultABIInfo.
//===----------------------------------------------------------------------===//
-class WebAssemblyABIInfo final : public DefaultABIInfo {
+class WebAssemblyABIInfo final : public SwiftABIInfo {
+ DefaultABIInfo defaultInfo;
+
public:
explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
- : DefaultABIInfo(CGT) {}
+ : SwiftABIInfo(CGT), defaultInfo(CGT) {}
private:
ABIArgInfo classifyReturnType(QualType RetTy) const;
@@ -741,6 +743,15 @@ private:
Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const override;
+
+ bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
+ bool asReturnValue) const override {
+ return occupiesMoreThan(CGT, scalars, /*total*/ 4);
+ }
+
+ bool isSwiftErrorInRegister() const override {
+ return false;
+ }
};
class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
@@ -778,7 +789,7 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
}
// Otherwise just do the default thing.
- return DefaultABIInfo::classifyArgumentType(Ty);
+ return defaultInfo.classifyArgumentType(Ty);
}
ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
@@ -798,7 +809,7 @@ ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
}
// Otherwise just do the default thing.
- return DefaultABIInfo::classifyReturnType(RetTy);
+ return defaultInfo.classifyReturnType(RetTy);
}
Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -2337,7 +2348,7 @@ static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
bool Quote = (Lib.find(" ") != StringRef::npos);
std::string ArgStr = Quote ? "\"" : "";
ArgStr += Lib;
- if (!Lib.endswith_lower(".lib"))
+ if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
ArgStr += ".lib";
ArgStr += Quote ? "\"" : "";
return ArgStr;
@@ -3944,18 +3955,39 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
}
- // Bool type is always extended to the ABI, other builtin types are not
- // extended.
- const BuiltinType *BT = Ty->getAs<BuiltinType>();
- if (BT && BT->getKind() == BuiltinType::Bool)
- return ABIArgInfo::getExtend(Ty);
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ return ABIArgInfo::getExtend(Ty);
- // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
- // passes them indirectly through memory.
- if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::x87DoubleExtended())
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ case BuiltinType::LongDouble:
+ // Mingw64 GCC uses the old 80 bit extended precision floating point
+ // unit. It passes them indirectly through memory.
+ if (IsMingw64) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::x87DoubleExtended())
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ break;
+
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ // If it's a parameter type, the normal ABI rule is that arguments larger
+ // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
+ // even though it isn't particularly efficient.
+ if (!IsReturnType)
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
+ // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
+ // Clang matches them for compatibility.
+ return ABIArgInfo::getDirect(
+ llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ default:
+ break;
+ }
}
return ABIArgInfo::getDirect();
@@ -4969,6 +5001,31 @@ public:
}
bool doesReturnSlotInterfereWithArgs() const override { return false; }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ auto Kind = CGM.getCodeGenOpts().getSignReturnAddress();
+ if (Kind != CodeGenOptions::SignReturnAddressScope::None) {
+ Fn->addFnAttr("sign-return-address",
+ Kind == CodeGenOptions::SignReturnAddressScope::All
+ ? "all"
+ : "non-leaf");
+
+ auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
+ Fn->addFnAttr("sign-return-address-key",
+ Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ if (CGM.getCodeGenOpts().BranchTargetEnforcement)
+ Fn->addFnAttr("branch-target-enforcement");
+ }
};
class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
@@ -4976,6 +5033,9 @@ public:
WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
: AArch64TargetCodeGenInfo(CGT, K) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
@@ -4986,6 +5046,14 @@ public:
Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
}
};
+
+void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
}
ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
@@ -5532,6 +5600,9 @@ public:
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
+ ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
+ uint64_t Members) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
bool isIllegalVectorType(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
@@ -5706,6 +5777,41 @@ void ARMABIInfo::setCCs() {
RuntimeCC = abiCC;
}
+ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64 || Size == 128) {
+ llvm::Type *ResType = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
+ const Type *Base,
+ uint64_t Members) const {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ if (const VectorType *VT = Base->getAs<VectorType>()) {
+ // FP16 vectors should be converted to integer vectors
+ if (!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType())) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ llvm::Type *NewVecTy = llvm::VectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
+}
+
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
bool isVariadic) const {
// 6.1.2.1 The following argument types are VFP CPRCs:
@@ -5720,25 +5826,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
- if (isIllegalVectorType(Ty)) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 32) {
- llvm::Type *ResType =
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64) {
- llvm::Type *ResType = llvm::VectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), 2);
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 128) {
- llvm::Type *ResType = llvm::VectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), 4);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- }
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
// _Float16 and __fp16 get passed as if it were an int or float, but with
// the top 16 bits unspecified. This is not done for OpenCL as it handles the
@@ -5774,11 +5863,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
// into VFP registers.
const Type *Base = nullptr;
uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members)) {
- assert(Base && "Base class should be set for homogeneous aggregate");
- // Base can be a floating-point or a vector.
- return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
- }
+ if (isHomogeneousAggregate(Ty, Base, Members))
+ return classifyHomogeneousAggregate(Ty, Base, Members);
} else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
// WatchOS does have homogeneous aggregates. Note that we intentionally use
// this convention even for a variadic function: the backend will use GPRs
@@ -5937,9 +6023,15 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- // Large vector types should be returned via memory.
- if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
- return getNaturalAlignIndirect(RetTy);
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // Large vector types should be returned via memory.
+ if (getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+ // FP16 vectors should be converted to integer vectors
+ if (!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType()))
+ return coerceIllegalVector(RetTy);
}
// _Float16 and __fp16 get returned as if it were an int or float, but with
@@ -5999,11 +6091,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
if (IsEffectivelyAAPCS_VFP) {
const Type *Base = nullptr;
uint64_t Members = 0;
- if (isHomogeneousAggregate(RetTy, Base, Members)) {
- assert(Base && "Base class should be set for homogeneous aggregate");
- // Homogeneous Aggregates are returned directly.
- return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
- }
+ if (isHomogeneousAggregate(RetTy, Base, Members))
+ return classifyHomogeneousAggregate(RetTy, Base, Members);
}
// Aggregates <= 4 bytes are returned in r0; other aggregates
@@ -6038,6 +6127,13 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
/// isIllegalVector - check whether Ty is an illegal vector type.
bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType> ()) {
+ // On targets that don't support FP16, FP16 is expanded into float, and we
+ // don't want the ABI to depend on whether or not FP16 is supported in
+ // hardware. Thus return false to coerce FP16 vectors into integer vectors.
+ if (!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType()))
+ return true;
if (isAndroid()) {
// Android shipped using Clang 3.1, which supported a slightly different
// vector ABI. The primary differences were that 3-element vector types
@@ -8164,6 +8260,137 @@ SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+// ARC ABI implementation.
+namespace {
+
+class ARCABIInfo : public DefaultABIInfo {
+public:
+ using DefaultABIInfo::DefaultABIInfo;
+
+private:
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
+ if (!State.FreeRegs)
+ return;
+ if (Info.isIndirect() && Info.getInReg())
+ State.FreeRegs--;
+ else if (Info.isDirect() && Info.getInReg()) {
+ unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (sz < State.FreeRegs)
+ State.FreeRegs -= sz;
+ else
+ State.FreeRegs = 0;
+ }
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State(FI.getCallingConvention());
+ // ARC uses 8 registers to pass arguments.
+ State.FreeRegs = 8;
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ updateState(FI.getReturnInfo(), FI.getReturnType(), State);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State.FreeRegs);
+ updateState(I.info, I.type, State);
+ }
+ }
+
+ ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
+ ABIArgInfo getIndirectByValue(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+};
+
+class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARCTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
+};
+
+
+ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
+ return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
+ getNaturalAlignIndirect(Ty, false);
+}
+
+ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ TypeAlign > MinABIStackAlignInBytes);
+}
+
+Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), true);
+}
+
+ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
+ uint8_t FreeRegs) const {
+ // Handle the generic C++ ABI.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect)
+ return getIndirectByRef(Ty, FreeRegs > 0);
+
+ if (RAA == CGCXXABI::RAA_DirectInMemory)
+ return getIndirectByValue(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectByValue(Ty);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+
+ return FreeRegs >= SizeInRegs ?
+ ABIArgInfo::getDirectInReg(Result) :
+ ABIArgInfo::getDirect(Result, 0, nullptr, false);
+ }
+
+ return Ty->isPromotableIntegerType() ?
+ (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
+ ABIArgInfo::getExtend(Ty)) :
+ (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
+ ABIArgInfo::getDirect());
+}
+
+ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirectInReg();
+
+ // Arguments of size > 4 registers are indirect.
+ auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
+ if (RetSize > 4)
+ return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+} // End anonymous namespace.
//===----------------------------------------------------------------------===//
// XCore ABI Implementation
@@ -8553,7 +8780,7 @@ static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
// The ABI requires unions to be sorted but not structures.
// See FieldEncoding::operator< for sort algorithm.
if (RT->isUnionType())
- llvm::sort(FE.begin(), FE.end());
+ llvm::sort(FE);
// We can now complete the TypeString.
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
@@ -8597,7 +8824,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
EnumEnc += '}';
FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
}
- llvm::sort(FE.begin(), FE.end());
+ llvm::sort(FE);
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
if (I)
@@ -9185,6 +9412,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
case llvm::Triple::xcore:
return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
+ case llvm::Triple::arc:
+ return SetCGInfo(new ARCTargetCodeGenInfo(Types));
case llvm::Triple::spir:
case llvm::Triple::spir64:
return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
diff --git a/lib/CodeGen/VarBypassDetector.cpp b/lib/CodeGen/VarBypassDetector.cpp
index 2f8a591a3e7f..859cdd4282cc 100644
--- a/lib/CodeGen/VarBypassDetector.cpp
+++ b/lib/CodeGen/VarBypassDetector.cpp
@@ -78,7 +78,7 @@ bool VarBypassDetector::BuildScopeInformation(const Stmt *S,
return false;
++StmtsToSkip;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case Stmt::GotoStmtClass:
FromScopes.push_back({S, ParentScope});
diff --git a/lib/CodeGen/VarBypassDetector.h b/lib/CodeGen/VarBypassDetector.h
index f50baf4bab9f..47fe13cfacd6 100644
--- a/lib/CodeGen/VarBypassDetector.h
+++ b/lib/CodeGen/VarBypassDetector.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_VARBYPASSDETECTOR_H
#define LLVM_CLANG_LIB_CODEGEN_VARBYPASSDETECTOR_H
+#include "clang/AST/Decl.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
diff --git a/lib/CrossTU/CrossTranslationUnit.cpp b/lib/CrossTU/CrossTranslationUnit.cpp
index e20ea7702237..7c97beb498a5 100644
--- a/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/lib/CrossTU/CrossTranslationUnit.cpp
@@ -17,10 +17,10 @@
#include "clang/CrossTU/CrossTUDiagnostic.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
@@ -32,6 +32,47 @@ namespace clang {
namespace cross_tu {
namespace {
+
+#define DEBUG_TYPE "CrossTranslationUnit"
+STATISTIC(NumGetCTUCalled, "The # of getCTUDefinition function called");
+STATISTIC(
+ NumNotInOtherTU,
+ "The # of getCTUDefinition called but the function is not in any other TU");
+STATISTIC(NumGetCTUSuccess,
+ "The # of getCTUDefinition successfully returned the "
+ "requested function's body");
+STATISTIC(NumTripleMismatch, "The # of triple mismatches");
+STATISTIC(NumLangMismatch, "The # of language mismatches");
+
+// Same as Triple's equality operator, but we check a field only if that is
+// known in both instances.
+bool hasEqualKnownFields(const llvm::Triple &Lhs, const llvm::Triple &Rhs) {
+ using llvm::Triple;
+ if (Lhs.getArch() != Triple::UnknownArch &&
+ Rhs.getArch() != Triple::UnknownArch && Lhs.getArch() != Rhs.getArch())
+ return false;
+ if (Lhs.getSubArch() != Triple::NoSubArch &&
+ Rhs.getSubArch() != Triple::NoSubArch &&
+ Lhs.getSubArch() != Rhs.getSubArch())
+ return false;
+ if (Lhs.getVendor() != Triple::UnknownVendor &&
+ Rhs.getVendor() != Triple::UnknownVendor &&
+ Lhs.getVendor() != Rhs.getVendor())
+ return false;
+ if (!Lhs.isOSUnknown() && !Rhs.isOSUnknown() &&
+ Lhs.getOS() != Rhs.getOS())
+ return false;
+ if (Lhs.getEnvironment() != Triple::UnknownEnvironment &&
+ Rhs.getEnvironment() != Triple::UnknownEnvironment &&
+ Lhs.getEnvironment() != Rhs.getEnvironment())
+ return false;
+ if (Lhs.getObjectFormat() != Triple::UnknownObjectFormat &&
+ Rhs.getObjectFormat() != Triple::UnknownObjectFormat &&
+ Lhs.getObjectFormat() != Rhs.getObjectFormat())
+ return false;
+ return true;
+}
+
// FIXME: This class is will be removed after the transition to llvm::Error.
class IndexErrorCategory : public std::error_category {
public:
@@ -55,6 +96,10 @@ public:
return "Failed to load external AST source.";
case index_error_code::failed_to_generate_usr:
return "Failed to generate USR.";
+ case index_error_code::triple_mismatch:
+ return "Triple mismatch";
+ case index_error_code::lang_mismatch:
+ return "Language mismatch";
}
llvm_unreachable("Unrecognized index_error_code.");
}
@@ -75,26 +120,26 @@ std::error_code IndexError::convertToErrorCode() const {
llvm::Expected<llvm::StringMap<std::string>>
parseCrossTUIndex(StringRef IndexPath, StringRef CrossTUDir) {
- std::ifstream ExternalFnMapFile(IndexPath);
- if (!ExternalFnMapFile)
+ std::ifstream ExternalMapFile(IndexPath);
+ if (!ExternalMapFile)
return llvm::make_error<IndexError>(index_error_code::missing_index_file,
IndexPath.str());
llvm::StringMap<std::string> Result;
std::string Line;
unsigned LineNo = 1;
- while (std::getline(ExternalFnMapFile, Line)) {
+ while (std::getline(ExternalMapFile, Line)) {
const size_t Pos = Line.find(" ");
if (Pos > 0 && Pos != std::string::npos) {
StringRef LineRef{Line};
- StringRef FunctionLookupName = LineRef.substr(0, Pos);
- if (Result.count(FunctionLookupName))
+ StringRef LookupName = LineRef.substr(0, Pos);
+ if (Result.count(LookupName))
return llvm::make_error<IndexError>(
index_error_code::multiple_definitions, IndexPath.str(), LineNo);
StringRef FileName = LineRef.substr(Pos + 1);
SmallString<256> FilePath = CrossTUDir;
llvm::sys::path::append(FilePath, FileName);
- Result[FunctionLookupName] = FilePath.str().str();
+ Result[LookupName] = FilePath.str().str();
} else
return llvm::make_error<IndexError>(
index_error_code::invalid_index_format, IndexPath.str(), LineNo);
@@ -149,23 +194,48 @@ CrossTranslationUnitContext::findFunctionInDeclContext(const DeclContext *DC,
llvm::Expected<const FunctionDecl *>
CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
StringRef CrossTUDir,
- StringRef IndexName) {
+ StringRef IndexName,
+ bool DisplayCTUProgress) {
+ assert(FD && "FD is missing, bad call to this function!");
assert(!FD->hasBody() && "FD has a definition in current translation unit!");
+ ++NumGetCTUCalled;
const std::string LookupFnName = getLookupName(FD);
if (LookupFnName.empty())
return llvm::make_error<IndexError>(
index_error_code::failed_to_generate_usr);
llvm::Expected<ASTUnit *> ASTUnitOrError =
- loadExternalAST(LookupFnName, CrossTUDir, IndexName);
+ loadExternalAST(LookupFnName, CrossTUDir, IndexName, DisplayCTUProgress);
if (!ASTUnitOrError)
return ASTUnitOrError.takeError();
ASTUnit *Unit = *ASTUnitOrError;
- if (!Unit)
- return llvm::make_error<IndexError>(
- index_error_code::failed_to_get_external_ast);
assert(&Unit->getFileManager() ==
&Unit->getASTContext().getSourceManager().getFileManager());
+ const llvm::Triple &TripleTo = Context.getTargetInfo().getTriple();
+ const llvm::Triple &TripleFrom =
+ Unit->getASTContext().getTargetInfo().getTriple();
+ // The imported AST had been generated for a different target.
+ // Some parts of the triple in the loaded ASTContext can be unknown while the
+ // very same parts in the target ASTContext are known. Thus we check for the
+ // known parts only.
+ if (!hasEqualKnownFields(TripleTo, TripleFrom)) {
+ // TODO: Pass the SourceLocation of the CallExpression for more precise
+ // diagnostics.
+ ++NumTripleMismatch;
+ return llvm::make_error<IndexError>(index_error_code::triple_mismatch,
+ Unit->getMainFileName(), TripleTo.str(),
+ TripleFrom.str());
+ }
+
+ const auto &LangTo = Context.getLangOpts();
+ const auto &LangFrom = Unit->getASTContext().getLangOpts();
+ // FIXME: Currenty we do not support CTU across C++ and C and across
+ // different dialects of C++.
+ if (LangTo.CPlusPlus != LangFrom.CPlusPlus) {
+ ++NumLangMismatch;
+ return llvm::make_error<IndexError>(index_error_code::lang_mismatch);
+ }
+
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
if (const FunctionDecl *ResultDecl =
findFunctionInDeclContext(TU, LookupFnName))
@@ -176,24 +246,29 @@ CrossTranslationUnitContext::getCrossTUDefinition(const FunctionDecl *FD,
void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
switch (IE.getCode()) {
case index_error_code::missing_index_file:
- Context.getDiagnostics().Report(diag::err_fe_error_opening)
- << IE.getFileName() << "required by the CrossTU functionality";
+ Context.getDiagnostics().Report(diag::err_ctu_error_opening)
+ << IE.getFileName();
break;
case index_error_code::invalid_index_format:
- Context.getDiagnostics().Report(diag::err_fnmap_parsing)
+ Context.getDiagnostics().Report(diag::err_extdefmap_parsing)
<< IE.getFileName() << IE.getLineNum();
break;
case index_error_code::multiple_definitions:
Context.getDiagnostics().Report(diag::err_multiple_def_index)
<< IE.getLineNum();
break;
+ case index_error_code::triple_mismatch:
+ Context.getDiagnostics().Report(diag::warn_ctu_incompat_triple)
+ << IE.getFileName() << IE.getTripleToName() << IE.getTripleFromName();
+ break;
default:
break;
}
}
llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
- StringRef LookupName, StringRef CrossTUDir, StringRef IndexName) {
+ StringRef LookupName, StringRef CrossTUDir, StringRef IndexName,
+ bool DisplayCTUProgress) {
// FIXME: The current implementation only supports loading functions with
// a lookup name from a single translation unit. If multiple
// translation units contains functions with the same lookup name an
@@ -216,8 +291,10 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
}
auto It = FunctionFileMap.find(LookupName);
- if (It == FunctionFileMap.end())
+ if (It == FunctionFileMap.end()) {
+ ++NumNotInOtherTU;
return llvm::make_error<IndexError>(index_error_code::missing_definition);
+ }
StringRef ASTFileName = It->second;
auto ASTCacheEntry = FileASTUnitMap.find(ASTFileName);
if (ASTCacheEntry == FileASTUnitMap.end()) {
@@ -233,6 +310,10 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts()));
Unit = LoadedUnit.get();
FileASTUnitMap[ASTFileName] = std::move(LoadedUnit);
+ if (DisplayCTUProgress) {
+ llvm::errs() << "CTU loaded AST file: "
+ << ASTFileName << "\n";
+ }
} else {
Unit = ASTCacheEntry->second.get();
}
@@ -240,27 +321,42 @@ llvm::Expected<ASTUnit *> CrossTranslationUnitContext::loadExternalAST(
} else {
Unit = FnUnitCacheEntry->second;
}
+ if (!Unit)
+ return llvm::make_error<IndexError>(
+ index_error_code::failed_to_get_external_ast);
return Unit;
}
llvm::Expected<const FunctionDecl *>
CrossTranslationUnitContext::importDefinition(const FunctionDecl *FD) {
+ assert(FD->hasBody() && "Functions to be imported should have body.");
+
ASTImporter &Importer = getOrCreateASTImporter(FD->getASTContext());
auto *ToDecl =
- cast<FunctionDecl>(Importer.Import(const_cast<FunctionDecl *>(FD)));
+ cast_or_null<FunctionDecl>(Importer.Import(const_cast<FunctionDecl *>(FD)));
+ if (!ToDecl)
+ return llvm::make_error<IndexError>(index_error_code::failed_import);
assert(ToDecl->hasBody());
assert(FD->hasBody() && "Functions already imported should have body.");
+ ++NumGetCTUSuccess;
return ToDecl;
}
+void CrossTranslationUnitContext::lazyInitLookupTable(
+ TranslationUnitDecl *ToTU) {
+ if (!LookupTable)
+ LookupTable = llvm::make_unique<ASTImporterLookupTable>(*ToTU);
+}
+
ASTImporter &
CrossTranslationUnitContext::getOrCreateASTImporter(ASTContext &From) {
auto I = ASTUnitImporterMap.find(From.getTranslationUnitDecl());
if (I != ASTUnitImporterMap.end())
return *I->second;
- ASTImporter *NewImporter =
- new ASTImporter(Context, Context.getSourceManager().getFileManager(),
- From, From.getSourceManager().getFileManager(), false);
+ lazyInitLookupTable(Context.getTranslationUnitDecl());
+ ASTImporter *NewImporter = new ASTImporter(
+ Context, Context.getSourceManager().getFileManager(), From,
+ From.getSourceManager().getFileManager(), false, LookupTable.get());
ASTUnitImporterMap[From.getTranslationUnitDecl()].reset(NewImporter);
return *NewImporter;
}
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index 99d588d9c009..d4c7040a233c 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -26,6 +26,7 @@ const char *Action::getClassName(ActionClass AC) {
return "offload";
case PreprocessJobClass: return "preprocessor";
case PrecompileJobClass: return "precompiler";
+ case HeaderModulePrecompileJobClass: return "header-module-precompiler";
case AnalyzeJobClass: return "analyzer";
case MigrateJobClass: return "migrator";
case CompileJobClass: return "compiler";
@@ -319,6 +320,19 @@ void PrecompileJobAction::anchor() {}
PrecompileJobAction::PrecompileJobAction(Action *Input, types::ID OutputType)
: JobAction(PrecompileJobClass, Input, OutputType) {}
+PrecompileJobAction::PrecompileJobAction(ActionClass Kind, Action *Input,
+ types::ID OutputType)
+ : JobAction(Kind, Input, OutputType) {
+ assert(isa<PrecompileJobAction>((Action*)this) && "invalid action kind");
+}
+
+void HeaderModulePrecompileJobAction::anchor() {}
+
+HeaderModulePrecompileJobAction::HeaderModulePrecompileJobAction(
+ Action *Input, types::ID OutputType, const char *ModuleName)
+ : PrecompileJobAction(HeaderModulePrecompileJobClass, Input, OutputType),
+ ModuleName(ModuleName) {}
+
void AnalyzeJobAction::anchor() {}
AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
@@ -382,7 +396,7 @@ VerifyPCHJobAction::VerifyPCHJobAction(Action *Input, types::ID Type)
void OffloadBundlingJobAction::anchor() {}
OffloadBundlingJobAction::OffloadBundlingJobAction(ActionList &Inputs)
- : JobAction(OffloadBundlingJobClass, Inputs, Inputs.front()->getType()) {}
+ : JobAction(OffloadBundlingJobClass, Inputs, Inputs.back()->getType()) {}
void OffloadUnbundlingJobAction::anchor() {}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 2b03c9f7001f..4793a1f90b2f 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -12,6 +12,7 @@ endif()
add_clang_library(clangDriver
Action.cpp
Compilation.cpp
+ DarwinSDKInfo.cpp
Distro.cpp
Driver.cpp
DriverOptions.cpp
@@ -47,17 +48,19 @@ add_clang_library(clangDriver
ToolChains/Haiku.cpp
ToolChains/HIP.cpp
ToolChains/Hexagon.cpp
+ ToolChains/Hurd.cpp
ToolChains/Linux.cpp
ToolChains/MipsLinux.cpp
ToolChains/MinGW.cpp
ToolChains/Minix.cpp
+ ToolChains/MSP430.cpp
ToolChains/MSVC.cpp
ToolChains/Myriad.cpp
ToolChains/NaCl.cpp
ToolChains/NetBSD.cpp
ToolChains/OpenBSD.cpp
ToolChains/PS4CPU.cpp
- ToolChains/RISCV.cpp
+ ToolChains/RISCVToolchain.cpp
ToolChains/Solaris.cpp
ToolChains/TCE.cpp
ToolChains/WebAssembly.cpp
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index ca2525dd07fb..982d7ecad962 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -127,7 +127,7 @@ bool Compilation::CleanupFile(const char *File, bool IssueErrors) const {
return true;
}
-bool Compilation::CleanupFileList(const ArgStringList &Files,
+bool Compilation::CleanupFileList(const llvm::opt::ArgStringList &Files,
bool IssueErrors) const {
bool Success = true;
for (const auto &File: Files)
diff --git a/lib/Driver/DarwinSDKInfo.cpp b/lib/Driver/DarwinSDKInfo.cpp
new file mode 100644
index 000000000000..547978b2f973
--- /dev/null
+++ b/lib/Driver/DarwinSDKInfo.cpp
@@ -0,0 +1,44 @@
+//===--- DarwinSDKInfo.cpp - SDK Information parser for darwin - ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/DarwinSDKInfo.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang;
+
+Expected<Optional<DarwinSDKInfo>>
+driver::parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath) {
+ llvm::SmallString<256> Filepath = SDKRootPath;
+ llvm::sys::path::append(Filepath, "SDKSettings.json");
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ VFS.getBufferForFile(Filepath);
+ if (!File) {
+ // If the file couldn't be read, assume it just doesn't exist.
+ return None;
+ }
+ Expected<llvm::json::Value> Result =
+ llvm::json::parse(File.get()->getBuffer());
+ if (!Result)
+ return Result.takeError();
+
+ if (const auto *Obj = Result->getAsObject()) {
+ auto VersionString = Obj->getString("Version");
+ if (VersionString) {
+ VersionTuple Version;
+ if (!Version.tryParse(*VersionString))
+ return DarwinSDKInfo(Version);
+ }
+ }
+ return llvm::make_error<llvm::StringError>("invalid SDKSettings.json",
+ llvm::inconvertibleErrorCode());
+}
diff --git a/lib/Driver/Distro.cpp b/lib/Driver/Distro.cpp
index 2c4d44faf8d0..396d0bee5603 100644
--- a/lib/Driver/Distro.cpp
+++ b/lib/Driver/Distro.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Distro.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
@@ -17,7 +18,7 @@
using namespace clang::driver;
using namespace clang;
-static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
+static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
VFS.getBufferForFile("/etc/lsb-release");
if (File) {
@@ -50,6 +51,7 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
.Case("artful", Distro::UbuntuArtful)
.Case("bionic", Distro::UbuntuBionic)
.Case("cosmic", Distro::UbuntuCosmic)
+ .Case("disco", Distro::UbuntuDisco)
.Default(Distro::UnknownDistro);
if (Version != Distro::UnknownDistro)
return Version;
@@ -136,7 +138,10 @@ static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
if (VFS.exists("/etc/arch-release"))
return Distro::ArchLinux;
+ if (VFS.exists("/etc/gentoo-release"))
+ return Distro::Gentoo;
+
return Distro::UnknownDistro;
}
-Distro::Distro(vfs::FileSystem &VFS) : DistroVal(DetectDistro(VFS)) {}
+Distro::Distro(llvm::vfs::FileSystem &VFS) : DistroVal(DetectDistro(VFS)) {}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 952a716cb6e6..a784e218f139 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -26,8 +26,10 @@
#include "ToolChains/HIP.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
+#include "ToolChains/Hurd.h"
#include "ToolChains/Lanai.h"
#include "ToolChains/Linux.h"
+#include "ToolChains/MSP430.h"
#include "ToolChains/MSVC.h"
#include "ToolChains/MinGW.h"
#include "ToolChains/Minix.h"
@@ -37,13 +39,12 @@
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
#include "ToolChains/PS4CPU.h"
-#include "ToolChains/RISCV.h"
+#include "ToolChains/RISCVToolchain.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
@@ -68,18 +69,21 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <memory>
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
+#include <sysexits.h> // EX_IOERR
#endif
using namespace clang::driver;
@@ -88,7 +92,7 @@ using namespace llvm::opt;
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Opts(createDriverOptTable()), Diags(Diags), VFS(std::move(VFS)),
Mode(GCCMode), SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
LTOMode(LTOK_None), ClangExecutable(ClangExecutable),
@@ -98,12 +102,11 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
CCGenDiagnostics(false), TargetTriple(TargetTriple),
CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
- CCCUsePCH(true), GenReproducer(false),
- SuppressMissingInputWarning(false) {
+ GenReproducer(false), SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
- this->VFS = vfs::getRealFileSystem();
+ this->VFS = llvm::vfs::getRealFileSystem();
Name = llvm::sys::path::filename(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
@@ -164,6 +167,7 @@ void Driver::setDriverModeFromOption(StringRef Opt) {
}
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
+ bool IsClCompatMode,
bool &ContainsError) {
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
ContainsError = false;
@@ -171,7 +175,7 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks();
+ getIncludeExcludeOptionFlagMasks(IsClCompatMode);
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args =
@@ -300,6 +304,7 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DerivedArgList *DAL = new DerivedArgList(Args);
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
+ bool HasNostdlibxx = Args.hasArg(options::OPT_nostdlibxx);
bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
for (Arg *A : Args) {
// Unfortunately, we have to parse some forwarding options (-Xassembler,
@@ -344,7 +349,8 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
StringRef Value = A->getValue();
// Rewrite unless -nostdlib is present.
- if (!HasNostdlib && !HasNodefaultlib && Value == "stdc++") {
+ if (!HasNostdlib && !HasNodefaultlib && !HasNostdlibxx &&
+ Value == "stdc++") {
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_stdcxx));
continue;
}
@@ -399,6 +405,13 @@ static llvm::Triple computeTargetTriple(const Driver &D,
llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
+ // GNU/Hurd's triples should have been -hurd-gnu*, but were historically made
+ // -gnu* only, and we can not change this, so we have to detect that case as
+ // being the Hurd OS.
+ if (TargetTriple.find("-unknown-gnu") != StringRef::npos ||
+ TargetTriple.find("-pc-gnu") != StringRef::npos)
+ Target.setOSName("hurd");
+
// Handle Apple-specific options available here.
if (Target.isOSBinFormatMachO()) {
// If an explicit Darwin arch name is given, that trumps all.
@@ -481,6 +494,29 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.setVendorName("intel");
}
+ // If target is MIPS adjust the target triple
+ // accordingly to provided ABI name.
+ A = Args.getLastArg(options::OPT_mabi_EQ);
+ if (A && Target.isMIPS()) {
+ StringRef ABIName = A->getValue();
+ if (ABIName == "32") {
+ Target = Target.get32BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNUABI64 ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (ABIName == "n32") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABI64)
+ Target.setEnvironment(llvm::Triple::GNUABIN32);
+ } else if (ABIName == "64") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNUABI64);
+ }
+ }
+
return Target;
}
@@ -705,7 +741,7 @@ bool Driver::readConfigFile(StringRef FileName) {
ConfigFile = CfgFileName.str();
bool ContainErrors;
CfgOptions = llvm::make_unique<InputArgList>(
- ParseArgStrings(NewCfgArgs, ContainErrors));
+ ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
if (ContainErrors) {
CfgOptions.reset();
return true;
@@ -899,7 +935,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Arguments specified in command line.
bool ContainsError;
CLOptions = llvm::make_unique<InputArgList>(
- ParseArgStrings(ArgList.slice(1), ContainsError));
+ ParseArgStrings(ArgList.slice(1), IsCLMode(), ContainsError));
// Try parsing configuration file.
if (!ContainsError)
@@ -909,22 +945,48 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// All arguments, from both config file and command line.
InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
: std::move(*CLOptions));
- if (HasConfigFile)
- for (auto *Opt : *CLOptions) {
- if (Opt->getOption().matches(options::OPT_config))
- continue;
+
+ auto appendOneArg = [&Args](const Arg *Opt, const Arg *BaseArg) {
unsigned Index = Args.MakeIndex(Opt->getSpelling());
- const Arg *BaseArg = &Opt->getBaseArg();
- if (BaseArg == Opt)
- BaseArg = nullptr;
Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
Index, BaseArg);
Copy->getValues() = Opt->getValues();
if (Opt->isClaimed())
Copy->claim();
Args.append(Copy);
+ };
+
+ if (HasConfigFile)
+ for (auto *Opt : *CLOptions) {
+ if (Opt->getOption().matches(options::OPT_config))
+ continue;
+ const Arg *BaseArg = &Opt->getBaseArg();
+ if (BaseArg == Opt)
+ BaseArg = nullptr;
+ appendOneArg(Opt, BaseArg);
}
+ // In CL mode, look for any pass-through arguments
+ if (IsCLMode() && !ContainsError) {
+ SmallVector<const char *, 16> CLModePassThroughArgList;
+ for (const auto *A : Args.filtered(options::OPT__SLASH_clang)) {
+ A->claim();
+ CLModePassThroughArgList.push_back(A->getValue());
+ }
+
+ if (!CLModePassThroughArgList.empty()) {
+ // Parse any pass through args using default clang processing rather
+ // than clang-cl processing.
+ auto CLModePassThroughOptions = llvm::make_unique<InputArgList>(
+ ParseArgStrings(CLModePassThroughArgList, false, ContainsError));
+
+ if (!ContainsError)
+ for (auto *Opt : *CLModePassThroughOptions) {
+ appendOneArg(Opt, nullptr);
+ }
+ }
+ }
+
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
@@ -947,8 +1009,6 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue();
- CCCUsePCH =
- Args.hasFlag(options::OPT_ccc_pch_is_pch, options::OPT_ccc_pch_is_pth);
GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
options::OPT_fno_crash_diagnostics,
!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
@@ -1377,8 +1437,9 @@ int Driver::ExecuteCompilation(
// Otherwise, remove result files and print extra information about abnormal
// failures.
+ int Res = 0;
for (const auto &CmdPair : FailingCommands) {
- int Res = CmdPair.first;
+ int CommandRes = CmdPair.first;
const Command *FailingCommand = CmdPair.second;
// Remove result files if we're not saving temps.
@@ -1387,10 +1448,19 @@ int Driver::ExecuteCompilation(
C.CleanupFileMap(C.getResultFiles(), JA, true);
// Failure result files are valid unless we crashed.
- if (Res < 0)
+ if (CommandRes < 0)
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
}
+#if LLVM_ON_UNIX
+ // llvm/lib/Support/Unix/Signals.inc will exit with a special return code
+ // for SIGPIPE. Do not print diagnostics for this case.
+ if (CommandRes == EX_IOERR) {
+ Res = CommandRes;
+ continue;
+ }
+#endif
+
// Print extra information about abnormal failures, if possible.
//
// This is ad-hoc, but we don't want to be excessively noisy. If the result
@@ -1400,30 +1470,31 @@ int Driver::ExecuteCompilation(
// diagnostics, so always print the diagnostic there.
const Tool &FailingTool = FailingCommand->getCreator();
- if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
+ if (!FailingCommand->getCreator().hasGoodDiagnostics() || CommandRes != 1) {
// FIXME: See FIXME above regarding result code interpretation.
- if (Res < 0)
+ if (CommandRes < 0)
Diag(clang::diag::err_drv_command_signalled)
<< FailingTool.getShortName();
else
- Diag(clang::diag::err_drv_command_failed) << FailingTool.getShortName()
- << Res;
+ Diag(clang::diag::err_drv_command_failed)
+ << FailingTool.getShortName() << CommandRes;
}
}
- return 0;
+ return Res;
}
void Driver::PrintHelp(bool ShowHidden) const {
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks();
+ getIncludeExcludeOptionFlagMasks(IsCLMode());
ExcludedFlagsBitmask |= options::NoDriverOption;
if (!ShowHidden)
ExcludedFlagsBitmask |= HelpHidden;
- getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
+ std::string Usage = llvm::formatv("{0} [options] file...", Name).str();
+ getOpts().PrintHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
IncludedFlagsBitmask, ExcludedFlagsBitmask,
/*ShowAllAliases=*/false);
}
@@ -1472,6 +1543,11 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
unsigned short DisableFlags =
options::NoDriverOption | options::Unsupported | options::Ignored;
+ // Distinguish "--autocomplete=-someflag" and "--autocomplete=-someflag,"
+ // because the latter indicates that the user put space before pushing tab
+ // which should end up in a file completion.
+ const bool HasSpace = PassedFlags.endswith(",");
+
// Parse PassedFlags by "," as all the command-line flags are passed to this
// function separated by ","
StringRef TargetFlags = PassedFlags;
@@ -1498,7 +1574,19 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
if (SuggestedCompletions.empty())
SuggestedCompletions = Opts->suggestValueCompletions(Cur, "");
- if (SuggestedCompletions.empty()) {
+ // If Flags were empty, it means the user typed `clang [tab]` where we should
+ // list all possible flags. If there was no value completion and the user
+ // pressed tab after a space, we should fall back to a file completion.
+ // We're printing a newline to be consistent with what we print at the end of
+ // this function.
+ if (SuggestedCompletions.empty() && HasSpace && !Flags.empty()) {
+ llvm::outs() << '\n';
+ return;
+ }
+
+ // When flag ends with '=' and there was no value completion, return empty
+ // string and fall back to the file autocompletion.
+ if (SuggestedCompletions.empty() && !Cur.endswith("=")) {
// If the flag is in the form of "--autocomplete=-foo",
// we were requested to print out all option names that start with "-foo".
// For example, "--autocomplete=-fsyn" is expanded to "-fsyntax-only".
@@ -1516,12 +1604,11 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// deterministic order. We could sort in any way, but we chose
// case-insensitive sorting for consistency with the -help option
// which prints out options in the case-insensitive alphabetical order.
- llvm::sort(SuggestedCompletions.begin(), SuggestedCompletions.end(),
- [](StringRef A, StringRef B) {
- if (int X = A.compare_lower(B))
- return X < 0;
- return A.compare(B) > 0;
- });
+ llvm::sort(SuggestedCompletions, [](StringRef A, StringRef B) {
+ if (int X = A.compare_lower(B))
+ return X < 0;
+ return A.compare(B) > 0;
+ });
llvm::outs() << llvm::join(SuggestedCompletions, "\n") << '\n';
}
@@ -1661,17 +1748,28 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
- for (const Multilib &Multilib : TC.getMultilibs()) {
- if (Multilib.gccSuffix().empty())
- llvm::outs() << ".\n";
- else {
- StringRef Suffix(Multilib.gccSuffix());
- assert(Suffix.front() == '/');
- llvm::outs() << Suffix.substr(1) << "\n";
- }
+ const Multilib &Multilib = TC.getMultilib();
+ if (Multilib.gccSuffix().empty())
+ llvm::outs() << ".\n";
+ else {
+ StringRef Suffix(Multilib.gccSuffix());
+ assert(Suffix.front() == '/');
+ llvm::outs() << Suffix.substr(1) << "\n";
}
return false;
}
+
+ if (C.getArgs().hasArg(options::OPT_print_target_triple)) {
+ llvm::outs() << TC.getTripleString() << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_effective_triple)) {
+ const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(C.getArgs()));
+ llvm::outs() << Triple.getTriple() << "\n";
+ return false;
+ }
+
return true;
}
@@ -1882,7 +1980,7 @@ static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
}
}
- if (llvm::sys::fs::exists(Twine(Path)))
+ if (D.getVFS().exists(Path))
return true;
if (D.IsCLMode()) {
@@ -1960,7 +2058,8 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Ty = types::TY_C;
} else {
// Otherwise lookup by extension.
- // Fallback is C if invoked as C preprocessor or Object otherwise.
+ // Fallback is C if invoked as C preprocessor, C++ if invoked with
+ // clang-cl /E, or Object otherwise.
// We use a host hook here because Darwin at least has its own
// idea of what .s is.
if (const char *Ext = strrchr(Value, '.'))
@@ -1969,6 +2068,8 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
if (Ty == types::TY_INVALID) {
if (CCCIsCPP())
Ty = types::TY_C;
+ else if (IsCLMode() && Args.hasArgNoClaim(options::OPT_E))
+ Ty = types::TY_CXX;
else
Ty = types::TY_Object;
}
@@ -2223,6 +2324,18 @@ class OffloadingActionBuilder final {
// If this is an unbundling action use it as is for each CUDA toolchain.
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
CudaDeviceActions.clear();
+ auto *IA = cast<InputAction>(UA->getInputs().back());
+ std::string FileName = IA->getInputArg().getAsString(Args);
+ // Check if the type of the file is the same as the action. Do not
+ // unbundle it if it is not. Do not unbundle .so files, for example,
+ // which are not object files.
+ if (IA->getType() == types::TY_Object &&
+ (!llvm::sys::path::has_extension(FileName) ||
+ types::lookupTypeForExtension(
+ llvm::sys::path::extension(FileName).drop_front()) !=
+ types::TY_Object))
+ return ABRT_Inactive;
+
for (auto Arch : GpuArchList) {
CudaDeviceActions.push_back(UA);
UA->registerDependentActionInfo(ToolChains[0], CudaArchToString(Arch),
@@ -2466,11 +2579,13 @@ class OffloadingActionBuilder final {
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
+ bool Relocatable;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
- : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {}
+ : CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP),
+ Relocatable(false) {}
bool canUseBundlerUnbundler() const override { return true; }
@@ -2479,23 +2594,70 @@ class OffloadingActionBuilder final {
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
// amdgcn does not support linking of object files, therefore we skip
- // backend and assemble phases to output LLVM IR.
- if (CudaDeviceActions.empty() || CurPhase == phases::Backend ||
+ // backend and assemble phases to output LLVM IR. Except for generating
+ // non-relocatable device coee, where we generate fat binary for device
+ // code and pass to host in Backend phase.
+ if (CudaDeviceActions.empty() ||
+ (CurPhase == phases::Backend && Relocatable) ||
CurPhase == phases::Assemble)
return ABRT_Success;
- assert((CurPhase == phases::Link ||
+ assert(((CurPhase == phases::Link && Relocatable) ||
CudaDeviceActions.size() == GpuArchList.size()) &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
"Not expecting CUDA actions in host-only compilation.");
- // Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
- // This happens to each device action originated from each input file.
- // Later on, device actions in DeviceLinkerInputs are used to create
- // device link actions in appendLinkDependences and the created device
- // link actions are passed to the offload action as device dependence.
- if (CurPhase == phases::Link) {
+ if (!Relocatable && CurPhase == phases::Backend) {
+ // If we are in backend phase, we attempt to generate the fat binary.
+ // We compile each arch to IR and use a link action to generate code
+ // object containing ISA. Then we use a special "link" action to create
+ // a fat binary containing all the code objects for different GPU's.
+ // The fat binary is then an input to the host action.
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ // Create a link action to link device IR with device library
+ // and generate ISA.
+ ActionList AL;
+ AL.push_back(CudaDeviceActions[I]);
+ CudaDeviceActions[I] =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Image);
+
+ // OffloadingActionBuilder propagates device arch until an offload
+ // action. Since the next action for creating fatbin does
+ // not have device arch, whereas the above link action and its input
+ // have device arch, an offload action is needed to stop the null
+ // device arch of the next action being propagated to the above link
+ // action.
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*CudaDeviceActions[I], *ToolChains.front(),
+ CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
+ CudaDeviceActions[I] = C.MakeAction<OffloadAction>(
+ DDep, CudaDeviceActions[I]->getType());
+ }
+ // Create HIP fat binary with a special "link" action.
+ CudaFatBinary =
+ C.MakeAction<LinkJobAction>(CudaDeviceActions,
+ types::TY_HIP_FATBIN);
+
+ if (!CompileDeviceOnly) {
+ DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
+ AssociatedOffloadKind);
+ // Clear the fat binary, it is already a dependence to an host
+ // action.
+ CudaFatBinary = nullptr;
+ }
+
+ // Remove the CUDA actions as they are already connected to an host
+ // action or fat binary.
+ CudaDeviceActions.clear();
+
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
+ } else if (CurPhase == phases::Link) {
+ // Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
+ // This happens to each device action originated from each input file.
+ // Later on, device actions in DeviceLinkerInputs are used to create
+ // device link actions in appendLinkDependences and the created device
+ // link actions are passed to the offload action as device dependence.
DeviceLinkerInputs.resize(CudaDeviceActions.size());
auto LI = DeviceLinkerInputs.begin();
for (auto *A : CudaDeviceActions) {
@@ -2528,6 +2690,13 @@ class OffloadingActionBuilder final {
++I;
}
}
+
+ bool initialize() override {
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
+
+ return CudaActionBuilderBase::initialize();
+ }
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
@@ -2548,6 +2717,8 @@ class OffloadingActionBuilder final {
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
+ if (OpenMPDeviceActions.empty())
+ return ABRT_Inactive;
// We should always have an action for each input.
assert(OpenMPDeviceActions.size() == ToolChains.size() &&
@@ -2591,6 +2762,17 @@ class OffloadingActionBuilder final {
// If this is an unbundling action use it as is for each OpenMP toolchain.
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
OpenMPDeviceActions.clear();
+ auto *IA = cast<InputAction>(UA->getInputs().back());
+ std::string FileName = IA->getInputArg().getAsString(Args);
+ // Check if the type of the file is the same as the action. Do not
+ // unbundle it if it is not. Do not unbundle .so files, for example,
+ // which are not object files.
+ if (IA->getType() == types::TY_Object &&
+ (!llvm::sys::path::has_extension(FileName) ||
+ types::lookupTypeForExtension(
+ llvm::sys::path::extension(FileName).drop_front()) !=
+ types::TY_Object))
+ return ABRT_Inactive;
for (unsigned I = 0; I < ToolChains.size(); ++I) {
OpenMPDeviceActions.push_back(UA);
UA->registerDependentActionInfo(
@@ -2835,6 +3017,11 @@ public:
OffloadKind |= SB->getAssociatedOffloadKind();
}
+ // Do not use unbundler if the Host does not depend on device action.
+ if (OffloadKind == Action::OFK_None && CanUseBundler)
+ if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction))
+ HostAction = UA->getInputs().back();
+
return false;
}
@@ -2852,8 +3039,10 @@ public:
}
// If we can use the bundler, replace the host action by the bundling one in
- // the resulting list. Otherwise, just append the device actions.
- if (CanUseBundler && !OffloadAL.empty()) {
+ // the resulting list. Otherwise, just append the device actions. For
+ // device only compilation, HostAction is a null pointer, therefore only do
+ // this when HostAction is not a null pointer.
+ if (CanUseBundler && HostAction && !OffloadAL.empty()) {
// Add the host action to the list in order to create the bundling action.
OffloadAL.push_back(HostAction);
@@ -2971,22 +3160,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
}
- // Diagnose unsupported forms of /Yc /Yu. Ignore /Yc/Yu for now if:
- // * no filename after it
- // * both /Yc and /Yu passed but with different filenames
- // * corresponding file not also passed as /FI
+ // Ignore /Yc/Yu if both /Yc and /Yu passed but with different filenames.
Arg *YcArg = Args.getLastArg(options::OPT__SLASH_Yc);
Arg *YuArg = Args.getLastArg(options::OPT__SLASH_Yu);
- if (YcArg && YcArg->getValue()[0] == '\0') {
- Diag(clang::diag::warn_drv_ycyu_no_arg_clang_cl) << YcArg->getSpelling();
- Args.eraseArg(options::OPT__SLASH_Yc);
- YcArg = nullptr;
- }
- if (YuArg && YuArg->getValue()[0] == '\0') {
- Diag(clang::diag::warn_drv_ycyu_no_arg_clang_cl) << YuArg->getSpelling();
- Args.eraseArg(options::OPT__SLASH_Yu);
- YuArg = nullptr;
- }
if (YcArg && YuArg && strcmp(YcArg->getValue(), YuArg->getValue()) != 0) {
Diag(clang::diag::warn_drv_ycyu_different_arg_clang_cl);
Args.eraseArg(options::OPT__SLASH_Yc);
@@ -2998,9 +3174,10 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.eraseArg(options::OPT__SLASH_Yc);
YcArg = nullptr;
}
- if (Args.hasArg(options::OPT__SLASH_Y_)) {
- // /Y- disables all pch handling. Rather than check for it everywhere,
- // just remove clang-cl pch-related flags here.
+ if (FinalPhase == phases::Preprocess || Args.hasArg(options::OPT__SLASH_Y_)) {
+ // If only preprocessing or /Y- is used, all pch handling is disabled.
+ // Rather than check for it everywhere, just remove clang-cl pch-related
+ // flags here.
Args.eraseArg(options::OPT__SLASH_Fp);
Args.eraseArg(options::OPT__SLASH_Yc);
Args.eraseArg(options::OPT__SLASH_Yu);
@@ -3011,6 +3188,7 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
// Construct the actions to perform.
+ HeaderModulePrecompileJobAction *HeaderModuleAction = nullptr;
ActionList LinkerInputs;
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
@@ -3107,13 +3285,29 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
+ // Each precompiled header file after a module file action is a module
+ // header of that same module file, rather than being compiled to a
+ // separate PCH.
+ if (Phase == phases::Precompile && HeaderModuleAction &&
+ getPrecompiledType(InputType) == types::TY_PCH) {
+ HeaderModuleAction->addModuleHeaderInput(Current);
+ Current = nullptr;
+ break;
+ }
+
+ // FIXME: Should we include any prior module file outputs as inputs of
+ // later actions in the same command line?
+
// Otherwise construct the appropriate action.
- auto *NewCurrent = ConstructPhaseAction(C, Args, Phase, Current);
+ Action *NewCurrent = ConstructPhaseAction(C, Args, Phase, Current);
// We didn't create a new action, so we will just move to the next phase.
if (NewCurrent == Current)
continue;
+ if (auto *HMA = dyn_cast<HeaderModulePrecompileJobAction>(NewCurrent))
+ HeaderModuleAction = HMA;
+
Current = NewCurrent;
// Use the current host action in any of the offloading actions, if
@@ -3193,10 +3387,25 @@ Action *Driver::ConstructPhaseAction(
types::ID OutputTy = getPrecompiledType(Input->getType());
assert(OutputTy != types::TY_INVALID &&
"Cannot precompile this input type!");
+
+ // If we're given a module name, precompile header file inputs as a
+ // module, not as a precompiled header.
+ const char *ModName = nullptr;
+ if (OutputTy == types::TY_PCH) {
+ if (Arg *A = Args.getLastArg(options::OPT_fmodule_name_EQ))
+ ModName = A->getValue();
+ if (ModName)
+ OutputTy = types::TY_ModuleFile;
+ }
+
if (Args.hasArg(options::OPT_fsyntax_only)) {
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
}
+
+ if (ModName)
+ return C.MakeAction<HeaderModulePrecompileJobAction>(Input, OutputTy,
+ ModName);
return C.MakeAction<PrecompileJobAction>(Input, OutputTy);
}
case phases::Compile: {
@@ -3449,7 +3658,7 @@ class ToolSelector final {
/// - Backend + Compile.
const Tool *
combineAssembleBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
- const ActionList *&Inputs,
+ ActionList &Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 3 || !canCollapseAssembleAction())
return nullptr;
@@ -3475,13 +3684,13 @@ class ToolSelector final {
if (!T->hasIntegratedAssembler())
return nullptr;
- Inputs = &CJ->getInputs();
+ Inputs = CJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/3);
return T;
}
const Tool *combineAssembleBackend(ArrayRef<JobActionInfo> ActionInfo,
- const ActionList *&Inputs,
+ ActionList &Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 2 || !canCollapseAssembleAction())
return nullptr;
@@ -3508,13 +3717,13 @@ class ToolSelector final {
if (!T->hasIntegratedAssembler())
return nullptr;
- Inputs = &BJ->getInputs();
+ Inputs = BJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/2);
return T;
}
const Tool *combineBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
- const ActionList *&Inputs,
+ ActionList &Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 2)
return nullptr;
@@ -3546,7 +3755,7 @@ class ToolSelector final {
if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
return nullptr;
- Inputs = &CJ->getInputs();
+ Inputs = CJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/2);
return T;
@@ -3556,22 +3765,28 @@ class ToolSelector final {
/// preprocessor action, and the current input is indeed a preprocessor
/// action. If combining results in the collapse of offloading actions, those
/// are appended to \a CollapsedOffloadAction.
- void combineWithPreprocessor(const Tool *T, const ActionList *&Inputs,
+ void combineWithPreprocessor(const Tool *T, ActionList &Inputs,
ActionList &CollapsedOffloadAction) {
if (!T || !canCollapsePreprocessorAction() || !T->hasIntegratedCPP())
return;
// Attempt to get a preprocessor action dependence.
ActionList PreprocessJobOffloadActions;
- auto *PJ = getPrevDependentAction(*Inputs, PreprocessJobOffloadActions);
- if (!PJ || !isa<PreprocessJobAction>(PJ))
- return;
+ ActionList NewInputs;
+ for (Action *A : Inputs) {
+ auto *PJ = getPrevDependentAction({A}, PreprocessJobOffloadActions);
+ if (!PJ || !isa<PreprocessJobAction>(PJ)) {
+ NewInputs.push_back(A);
+ continue;
+ }
- // This is legal to combine. Append any offload action we found and set the
- // current inputs to preprocessor inputs.
- CollapsedOffloadAction.append(PreprocessJobOffloadActions.begin(),
- PreprocessJobOffloadActions.end());
- Inputs = &PJ->getInputs();
+ // This is legal to combine. Append any offload action we found and add the
+ // current input to preprocessor inputs.
+ CollapsedOffloadAction.append(PreprocessJobOffloadActions.begin(),
+ PreprocessJobOffloadActions.end());
+ NewInputs.append(PJ->input_begin(), PJ->input_end());
+ }
+ Inputs = NewInputs;
}
public:
@@ -3589,7 +3804,7 @@ public:
/// connected to collapsed actions are updated accordingly. The latter enables
/// the caller of the selector to process them afterwards instead of just
/// dropping them. If no suitable tool is found, null will be returned.
- const Tool *getTool(const ActionList *&Inputs,
+ const Tool *getTool(ActionList &Inputs,
ActionList &CollapsedOffloadAction) {
//
// Get the largest chain of actions that we could combine.
@@ -3625,7 +3840,7 @@ public:
if (!T)
T = combineBackendCompile(ActionChain, Inputs, CollapsedOffloadAction);
if (!T) {
- Inputs = &BaseAction->getInputs();
+ Inputs = BaseAction->getInputs();
T = TC.SelectTool(*BaseAction);
}
@@ -3770,7 +3985,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
}
- const ActionList *Inputs = &A->getInputs();
+ ActionList Inputs = A->getInputs();
const JobAction *JA = cast<JobAction>(A);
ActionList CollapsedOffloadActions;
@@ -3796,7 +4011,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
// Only use pipes when there is exactly one input.
InputInfoList InputInfos;
- for (const Action *Input : *Inputs) {
+ for (const Action *Input : Inputs) {
// Treat dsymutil and verify sub-jobs as being at the top-level too, they
// shouldn't get temporary output names.
// FIXME: Clean this up.
@@ -3815,6 +4030,10 @@ InputInfo Driver::BuildJobsForActionNoCache(
if (JA->getType() == types::TY_dSYM)
BaseInput = InputInfos[0].getFilename();
+ // ... and in header module compilations, which use the module name.
+ if (auto *ModuleJA = dyn_cast<HeaderModulePrecompileJobAction>(JA))
+ BaseInput = ModuleJA->getModuleName();
+
// Append outputs of offload device jobs to the input list
if (!OffloadDependencesInputInfo.empty())
InputInfos.append(OffloadDependencesInputInfo.begin(),
@@ -4153,16 +4372,24 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
- // Respect a limited subset of the '-Bprefix' functionality in GCC by
- // attempting to use this prefix when looking for file paths.
- for (const std::string &Dir : PrefixDirs) {
- if (Dir.empty())
- continue;
- SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
- llvm::sys::path::append(P, Name);
- if (llvm::sys::fs::exists(Twine(P)))
- return P.str();
- }
+ // Search for Name in a list of paths.
+ auto SearchPaths = [&](const llvm::SmallVectorImpl<std::string> &P)
+ -> llvm::Optional<std::string> {
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when looking for file paths.
+ for (const auto &Dir : P) {
+ if (Dir.empty())
+ continue;
+ SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
+ llvm::sys::path::append(P, Name);
+ if (llvm::sys::fs::exists(Twine(P)))
+ return P.str().str();
+ }
+ return None;
+ };
+
+ if (auto P = SearchPaths(PrefixDirs))
+ return *P;
SmallString<128> R(ResourceDir);
llvm::sys::path::append(R, Name);
@@ -4174,14 +4401,11 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (llvm::sys::fs::exists(Twine(P)))
return P.str();
- for (const std::string &Dir : TC.getFilePaths()) {
- if (Dir.empty())
- continue;
- SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
- llvm::sys::path::append(P, Name);
- if (llvm::sys::fs::exists(Twine(P)))
- return P.str();
- }
+ if (auto P = SearchPaths(TC.getLibraryPaths()))
+ return *P;
+
+ if (auto P = SearchPaths(TC.getFilePaths()))
+ return *P;
return Name;
}
@@ -4255,6 +4479,17 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
return Path.str();
}
+std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
+ SmallString<128> Path;
+ std::error_code EC = llvm::sys::fs::createUniqueDirectory(Prefix, Path);
+ if (EC) {
+ Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return "";
+ }
+
+ return Path.str();
+}
+
std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
SmallString<128> Output;
if (Arg *FpArg = C.getArgs().getLastArg(options::OPT__SLASH_Fp)) {
@@ -4267,11 +4502,11 @@ std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
// extension of .pch is assumed. "
if (!llvm::sys::path::has_extension(Output))
Output += ".pch";
- } else if (Arg *YcArg = C.getArgs().getLastArg(options::OPT__SLASH_Yc)) {
- Output = YcArg->getValue();
- llvm::sys::path::replace_extension(Output, ".pch");
} else {
- Output = BaseName;
+ if (Arg *YcArg = C.getArgs().getLastArg(options::OPT__SLASH_Yc))
+ Output = YcArg->getValue();
+ if (Output.empty())
+ Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
}
return Output.str();
@@ -4373,6 +4608,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Contiki:
TC = llvm::make_unique<toolchains::Contiki>(*this, Target, Args);
break;
+ case llvm::Triple::Hurd:
+ TC = llvm::make_unique<toolchains::Hurd>(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -4400,6 +4638,10 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::avr:
TC = llvm::make_unique<toolchains::AVRToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::msp430:
+ TC =
+ llvm::make_unique<toolchains::MSP430ToolChain>(*this, Target, Args);
+ break;
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
TC = llvm::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
@@ -4508,11 +4750,11 @@ bool Driver::GetReleaseVersion(StringRef Str,
return false;
}
-std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks() const {
+std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
unsigned IncludedFlagsBitmask = 0;
unsigned ExcludedFlagsBitmask = options::NoDriverOption;
- if (Mode == CLMode) {
+ if (IsClCompatMode) {
// Include CL and Core options.
IncludedFlagsBitmask |= options::CLOption;
IncludedFlagsBitmask |= options::CoreOption;
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index bd1a9bd8e3eb..8d1dfbe12d73 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -35,7 +35,8 @@ using namespace clang;
using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
- const char *Executable, const ArgStringList &Arguments,
+ const char *Executable,
+ const llvm::opt::ArgStringList &Arguments,
ArrayRef<InputInfo> Inputs)
: Source(Source), Creator(Creator), Executable(Executable),
Arguments(Arguments) {
@@ -315,6 +316,12 @@ void Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
+ if (PrintInputFilenames) {
+ for (const char *Arg : InputFilenames)
+ llvm::outs() << llvm::sys::path::filename(Arg) << "\n";
+ llvm::outs().flush();
+ }
+
SmallVector<const char*, 128> Argv;
Optional<ArrayRef<StringRef>> Env;
@@ -366,7 +373,7 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
const char *Executable_,
- const ArgStringList &Arguments_,
+ const llvm::opt::ArgStringList &Arguments_,
ArrayRef<InputInfo> Inputs,
std::unique_ptr<Command> Fallback_)
: Command(Source_, Creator_, Executable_, Arguments_, Inputs),
@@ -405,11 +412,9 @@ int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
return SecondaryStatus;
}
-ForceSuccessCommand::ForceSuccessCommand(const Action &Source_,
- const Tool &Creator_,
- const char *Executable_,
- const ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs)
+ForceSuccessCommand::ForceSuccessCommand(
+ const Action &Source_, const Tool &Creator_, const char *Executable_,
+ const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs)
: Command(Source_, Creator_, Executable_, Arguments_, Inputs) {}
void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index 5d3e31567ce6..1a46073aaa37 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -34,8 +34,9 @@ enum : SanitizerMask {
RequiresPIE = DataFlow | HWAddress | Scudo,
NeedsUnwindTables = Address | HWAddress | Thread | Memory | DataFlow,
SupportsCoverage = Address | HWAddress | KernelAddress | KernelHWAddress |
- Memory | Leak | Undefined | Integer | ImplicitConversion |
- Nullability | DataFlow | Fuzzer | FuzzerNoLink,
+ Memory | KernelMemory | Leak | Undefined | Integer |
+ ImplicitConversion | Nullability | DataFlow | Fuzzer |
+ FuzzerNoLink,
RecoverableByDefault = Undefined | Integer | ImplicitConversion | Nullability,
Unrecoverable = Unreachable | Return,
AlwaysRecoverable = KernelAddress | KernelHWAddress,
@@ -46,7 +47,7 @@ enum : SanitizerMask {
TrappingDefault = CFI,
CFIClasses =
CFIVCall | CFINVCall | CFIMFCall | CFIDerivedCast | CFIUnrelatedCast,
- CompatibleWithMinimalRuntime = TrappingSupported | Scudo,
+ CompatibleWithMinimalRuntime = TrappingSupported | Scudo | ShadowCallStack,
};
enum CoverageFeature {
@@ -206,6 +207,8 @@ bool SanitizerArgs::needsUnwindTables() const {
return Sanitizers.Mask & NeedsUnwindTables;
}
+bool SanitizerArgs::needsLTO() const { return Sanitizers.Mask & NeedsLTO; }
+
SanitizerArgs::SanitizerArgs(const ToolChain &TC,
const llvm::opt::ArgList &Args) {
SanitizerMask AllRemove = 0; // During the loop below, the accumulated set of
@@ -375,13 +378,12 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
KernelAddress | Efficiency),
std::make_pair(SafeStack, Address | HWAddress | Leak | Thread | Memory |
KernelAddress | Efficiency),
- std::make_pair(ShadowCallStack, Address | HWAddress | Leak | Thread |
- Memory | KernelAddress | Efficiency |
- SafeStack),
std::make_pair(KernelHWAddress, Address | HWAddress | Leak | Thread |
Memory | KernelAddress | Efficiency |
- SafeStack | ShadowCallStack)};
-
+ SafeStack),
+ std::make_pair(KernelMemory, Address | HWAddress | Leak | Thread |
+ Memory | KernelAddress | Efficiency |
+ Scudo | SafeStack)};
// Enable toolchain specific default sanitizers if not explicitly disabled.
SanitizerMask Default = TC.getDefaultSanitizers() & ~AllRemove;
@@ -721,16 +723,38 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fsanitize_address_use_after_scope,
options::OPT_fno_sanitize_address_use_after_scope, AsanUseAfterScope);
+ AsanPoisonCustomArrayCookie = Args.hasFlag(
+ options::OPT_fsanitize_address_poison_custom_array_cookie,
+ options::OPT_fno_sanitize_address_poison_custom_array_cookie,
+ AsanPoisonCustomArrayCookie);
+
// As a workaround for a bug in gold 2.26 and earlier, dead stripping of
// globals in ASan is disabled by default on ELF targets.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
AsanGlobalsDeadStripping =
!TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
Args.hasArg(options::OPT_fsanitize_address_globals_dead_stripping);
+
+ AsanUseOdrIndicator =
+ Args.hasFlag(options::OPT_fsanitize_address_use_odr_indicator,
+ options::OPT_fno_sanitize_address_use_odr_indicator,
+ AsanUseOdrIndicator);
} else {
AsanUseAfterScope = false;
}
+ if (AllAddedKinds & HWAddress) {
+ if (Arg *HwasanAbiArg =
+ Args.getLastArg(options::OPT_fsanitize_hwaddress_abi_EQ)) {
+ HwasanAbi = HwasanAbiArg->getValue();
+ if (HwasanAbi != "platform" && HwasanAbi != "interceptor")
+ D.Diag(clang::diag::err_drv_invalid_value)
+ << HwasanAbiArg->getAsString(Args) << HwasanAbi;
+ } else {
+ HwasanAbi = "interceptor";
+ }
+ }
+
if (AllAddedKinds & SafeStack) {
// SafeStack runtime is built into the system on Fuchsia.
SafeStackRuntime = !TC.getTriple().isOSFuchsia();
@@ -894,9 +918,20 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (AsanUseAfterScope)
CmdArgs.push_back("-fsanitize-address-use-after-scope");
+ if (AsanPoisonCustomArrayCookie)
+ CmdArgs.push_back("-fsanitize-address-poison-custom-array-cookie");
+
if (AsanGlobalsDeadStripping)
CmdArgs.push_back("-fsanitize-address-globals-dead-stripping");
+ if (AsanUseOdrIndicator)
+ CmdArgs.push_back("-fsanitize-address-use-odr-indicator");
+
+ if (!HwasanAbi.empty()) {
+ CmdArgs.push_back("-default-function-attr");
+ CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
+ }
+
// MSan: Workaround for PR16386.
// ASan: This is mainly to help LSan with cases such as
// https://github.com/google/sanitizers/issues/373
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index cf3db34688df..88a627eab6de 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -13,7 +13,6 @@
#include "ToolChains/Clang.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Driver.h"
@@ -39,6 +38,7 @@
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <cassert>
#include <cstddef>
#include <cstring>
@@ -74,10 +74,17 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
- SmallString<128> P(D.ResourceDir);
+ SmallString<128> P;
+
+ P.assign(D.ResourceDir);
llvm::sys::path::append(P, D.getTargetTriple(), "lib");
if (getVFS().exists(P))
- getFilePaths().push_back(P.str());
+ getLibraryPaths().push_back(P.str());
+
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, Triple.str(), "lib");
+ if (getVFS().exists(P))
+ getLibraryPaths().push_back(P.str());
std::string CandidateLibPath = getArchSpecificLibPath();
if (getVFS().exists(CandidateLibPath))
@@ -92,7 +99,9 @@ void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) {
ToolChain::~ToolChain() = default;
-vfs::FileSystem &ToolChain::getVFS() const { return getDriver().getVFS(); }
+llvm::vfs::FileSystem &ToolChain::getVFS() const {
+ return getDriver().getVFS();
+}
bool ToolChain::useIntegratedAs() const {
return Args.hasFlag(options::OPT_fintegrated_as,
@@ -295,6 +304,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::CompileJobClass:
case Action::PrecompileJobClass:
+ case Action::HeaderModulePrecompileJobClass:
case Action::PreprocessJobClass:
case Action::AnalyzeJobClass:
case Action::MigrateJobClass:
@@ -359,15 +369,16 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
const char *Prefix = IsITANMSVCWindows ? "" : "lib";
- const char *Suffix = Shared ? (Triple.isOSWindows() ? ".dll" : ".so")
+ const char *Suffix = Shared ? (Triple.isOSWindows() ? ".lib" : ".so")
: (IsITANMSVCWindows ? ".lib" : ".a");
+ if (Shared && Triple.isWindowsGNUEnvironment())
+ Suffix = ".dll.a";
- const Driver &D = getDriver();
- SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, D.getTargetTriple(), "lib");
- if (getVFS().exists(P)) {
+ for (const auto &LibPath : getLibraryPaths()) {
+ SmallString<128> P(LibPath);
llvm::sys::path::append(P, Prefix + Twine("clang_rt.") + Component + Suffix);
- return P.str();
+ if (getVFS().exists(P))
+ return P.str();
}
StringRef Arch = getArchNameForCompilerRTLib(*this, Args);
@@ -392,19 +403,23 @@ std::string ToolChain::getArchSpecificLibPath() const {
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
- if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
- false) ||
+ if (needsGCovInstrumentation(Args) ||
Args.hasArg(options::OPT_fprofile_generate) ||
Args.hasArg(options::OPT_fprofile_generate_EQ) ||
Args.hasArg(options::OPT_fprofile_instr_generate) ||
Args.hasArg(options::OPT_fprofile_instr_generate_EQ) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage))
+ Args.hasArg(options::OPT_fcreate_profile))
return true;
return false;
}
+bool ToolChain::needsGCovInstrumentation(const llvm::opt::ArgList &Args) {
+ return Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasArg(options::OPT_coverage);
+}
+
Tool *ToolChain::SelectTool(const JobAction &JA) const {
if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
Action::ActionClass AC = JA.getKind();
@@ -589,7 +604,7 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
// Check to see if an explicit choice to use thumb has been made via
// -mthumb. For assembler files we must check for -mthumb in the options
- // passed to the assember via -Wa or -Xassembler.
+ // passed to the assembler via -Wa or -Xassembler.
bool IsThumb = false;
if (InputType != types::TY_PP_Asm)
IsThumb = Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb,
@@ -762,6 +777,10 @@ void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
void ToolChain::AddFilePathLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ for (const auto &LibPath : getLibraryPaths())
+ if(LibPath.length() > 0)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+
for (const auto &LibPath : getFilePaths())
if(LibPath.length() > 0)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
diff --git a/lib/Driver/ToolChains/AMDGPU.cpp b/lib/Driver/ToolChains/AMDGPU.cpp
index 6b673feeadfc..a421a09891cd 100644
--- a/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/lib/Driver/ToolChains/AMDGPU.cpp
@@ -98,3 +98,16 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
return DAL;
}
+
+void AMDGPUToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ // Default to "hidden" visibility, as object level linking will not be
+ // supported for the foreseeable future.
+ if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat)) {
+ CC1Args.push_back("-fvisibility");
+ CC1Args.push_back("hidden");
+ }
+}
diff --git a/lib/Driver/ToolChains/AMDGPU.h b/lib/Driver/ToolChains/AMDGPU.h
index 36114d0dabc4..9d38eeedf59d 100644
--- a/lib/Driver/ToolChains/AMDGPU.h
+++ b/lib/Driver/ToolChains/AMDGPU.h
@@ -61,6 +61,10 @@ public:
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
+
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
};
} // end namespace toolchains
diff --git a/lib/Driver/ToolChains/Arch/AArch64.cpp b/lib/Driver/ToolChains/Arch/AArch64.cpp
index 5114279b4b45..71e55fe79e27 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -19,10 +19,17 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+/// \returns true if the given triple can determine the default CPU type even
+/// if -arch is not specified.
+static bool isCPUDeterminedByTriple(const llvm::Triple &Triple) {
+ return Triple.isOSDarwin();
+}
+
/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are
/// targeting. Set \p A to the Arg corresponding to the -mcpu argument if it is
/// provided, or to nullptr otherwise.
-std::string aarch64::getAArch64TargetCPU(const ArgList &Args, Arg *&A) {
+std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
+ const llvm::Triple &Triple, Arg *&A) {
std::string CPU;
// If we have -mcpu, use that.
if ((A = Args.getLastArg(options::OPT_mcpu_EQ))) {
@@ -36,9 +43,9 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args, Arg *&A) {
else if (CPU.size())
return CPU;
- // Make sure we pick "cyclone" if -arch is used.
- // FIXME: Should this be picked by checking the target triple instead?
- if (Args.getLastArg(options::OPT_arch))
+ // Make sure we pick "cyclone" if -arch is used or when targetting a Darwin
+ // OS.
+ if (Args.getLastArg(options::OPT_arch) || Triple.isOSDarwin())
return "cyclone";
return "generic";
@@ -152,7 +159,9 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
return getAArch64MicroArchFeaturesFromMtune(D, CPU, Args, Features);
}
-void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
+void aarch64::getAArch64TargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features) {
Arg *A;
bool success = true;
@@ -162,9 +171,9 @@ void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features);
else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
- else if (Args.hasArg(options::OPT_arch))
- success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args, A),
- Args, Features);
+ else if (Args.hasArg(options::OPT_arch) || isCPUDeterminedByTriple(Triple))
+ success = getAArch64ArchFeaturesFromMcpu(
+ D, getAArch64TargetCPU(Args, Triple, A), Args, Features);
if (success && (A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)))
success =
@@ -172,9 +181,10 @@ void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ)))
success =
getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
- else if (success && Args.hasArg(options::OPT_arch))
+ else if (success &&
+ (Args.hasArg(options::OPT_arch) || isCPUDeterminedByTriple(Triple)))
success = getAArch64MicroArchFeaturesFromMcpu(
- D, getAArch64TargetCPU(Args, A), Args, Features);
+ D, getAArch64TargetCPU(Args, Triple, A), Args, Features);
if (!success)
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
@@ -193,17 +203,172 @@ void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back("-crc");
}
+ // Handle (arch-dependent) fp16fml/fullfp16 relationship.
+ // FIXME: this fp16fml option handling will be reimplemented after the
+ // TargetParser rewrite.
+ const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
+ const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
+ if (std::find(Features.begin(), Features.end(), "+v8.4a") != Features.end()) {
+ const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
+ if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
+ // Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
+ // Only append the +fp16fml if there is no -fp16fml after the +fullfp16.
+ if (std::find(Features.rbegin(), ItRFullFP16, "-fp16fml") == ItRFullFP16)
+ Features.push_back("+fp16fml");
+ }
+ else
+ goto fp16_fml_fallthrough;
+ }
+ else {
+fp16_fml_fallthrough:
+ // In both of these cases, putting the 'other' feature on the end of the vector will
+ // result in the same effect as placing it immediately after the current feature.
+ if (ItRNoFullFP16 < ItRFP16FML)
+ Features.push_back("-fp16fml");
+ else if (ItRNoFullFP16 > ItRFP16FML)
+ Features.push_back("+fullfp16");
+ }
+
+ // FIXME: this needs reimplementation too after the TargetParser rewrite
+ //
+ // Context sensitive meaning of Crypto:
+ // 1) For Arch >= ARMv8.4a: crypto = sm4 + sha3 + sha2 + aes
+ // 2) For Arch <= ARMv8.3a: crypto = sha2 + aes
+ const auto ItBegin = Features.begin();
+ const auto ItEnd = Features.end();
+ const auto ItRBegin = Features.rbegin();
+ const auto ItREnd = Features.rend();
+ const auto ItRCrypto = std::find(ItRBegin, ItREnd, "+crypto");
+ const auto ItRNoCrypto = std::find(ItRBegin, ItREnd, "-crypto");
+ const auto HasCrypto = ItRCrypto != ItREnd;
+ const auto HasNoCrypto = ItRNoCrypto != ItREnd;
+ const ptrdiff_t PosCrypto = ItRCrypto - ItRBegin;
+ const ptrdiff_t PosNoCrypto = ItRNoCrypto - ItRBegin;
+
+ bool NoCrypto = false;
+ if (HasCrypto && HasNoCrypto) {
+ if (PosNoCrypto < PosCrypto)
+ NoCrypto = true;
+ }
+
+ if (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd) {
+ if (HasCrypto && !NoCrypto) {
+ // Check if we have NOT disabled an algorithm with something like:
+ // +crypto, -algorithm
+ // And if "-algorithm" does not occur, we enable that crypto algorithm.
+ const bool HasSM4 = (std::find(ItBegin, ItEnd, "-sm4") == ItEnd);
+ const bool HasSHA3 = (std::find(ItBegin, ItEnd, "-sha3") == ItEnd);
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
+ if (HasSM4)
+ Features.push_back("+sm4");
+ if (HasSHA3)
+ Features.push_back("+sha3");
+ if (HasSHA2)
+ Features.push_back("+sha2");
+ if (HasAES)
+ Features.push_back("+aes");
+ } else if (HasNoCrypto) {
+ // Check if we have NOT enabled a crypto algorithm with something like:
+ // -crypto, +algorithm
+ // And if "+algorithm" does not occur, we disable that crypto algorithm.
+ const bool HasSM4 = (std::find(ItBegin, ItEnd, "+sm4") != ItEnd);
+ const bool HasSHA3 = (std::find(ItBegin, ItEnd, "+sha3") != ItEnd);
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
+ if (!HasSM4)
+ Features.push_back("-sm4");
+ if (!HasSHA3)
+ Features.push_back("-sha3");
+ if (!HasSHA2)
+ Features.push_back("-sha2");
+ if (!HasAES)
+ Features.push_back("-aes");
+ }
+ } else {
+ if (HasCrypto && !NoCrypto) {
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
+ if (HasSHA2)
+ Features.push_back("+sha2");
+ if (HasAES)
+ Features.push_back("+aes");
+ } else if (HasNoCrypto) {
+ const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
+ const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
+ const bool HasV82a = (std::find(ItBegin, ItEnd, "+v8.2a") != ItEnd);
+ const bool HasV83a = (std::find(ItBegin, ItEnd, "+v8.3a") != ItEnd);
+ const bool HasV84a = (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd);
+ if (!HasSHA2)
+ Features.push_back("-sha2");
+ if (!HasAES)
+ Features.push_back("-aes");
+ if (HasV82a || HasV83a || HasV84a) {
+ Features.push_back("-sm4");
+ Features.push_back("-sha3");
+ }
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access))
if (A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
+ if (Args.hasArg(options::OPT_ffixed_x1))
+ Features.push_back("+reserve-x1");
+
+ if (Args.hasArg(options::OPT_ffixed_x2))
+ Features.push_back("+reserve-x2");
+
+ if (Args.hasArg(options::OPT_ffixed_x3))
+ Features.push_back("+reserve-x3");
+
+ if (Args.hasArg(options::OPT_ffixed_x4))
+ Features.push_back("+reserve-x4");
+
+ if (Args.hasArg(options::OPT_ffixed_x5))
+ Features.push_back("+reserve-x5");
+
+ if (Args.hasArg(options::OPT_ffixed_x6))
+ Features.push_back("+reserve-x6");
+
+ if (Args.hasArg(options::OPT_ffixed_x7))
+ Features.push_back("+reserve-x7");
+
if (Args.hasArg(options::OPT_ffixed_x18))
Features.push_back("+reserve-x18");
if (Args.hasArg(options::OPT_ffixed_x20))
Features.push_back("+reserve-x20");
+ if (Args.hasArg(options::OPT_fcall_saved_x8))
+ Features.push_back("+call-saved-x8");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x9))
+ Features.push_back("+call-saved-x9");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x10))
+ Features.push_back("+call-saved-x10");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x11))
+ Features.push_back("+call-saved-x11");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x12))
+ Features.push_back("+call-saved-x12");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x13))
+ Features.push_back("+call-saved-x13");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x14))
+ Features.push_back("+call-saved-x14");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x15))
+ Features.push_back("+call-saved-x15");
+
+ if (Args.hasArg(options::OPT_fcall_saved_x18))
+ Features.push_back("+call-saved-x18");
+
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
}
diff --git a/lib/Driver/ToolChains/Arch/AArch64.h b/lib/Driver/ToolChains/Arch/AArch64.h
index 62e419cc19f7..5f6148ebd6c4 100644
--- a/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/lib/Driver/ToolChains/Arch/AArch64.h
@@ -21,11 +21,12 @@ namespace driver {
namespace tools {
namespace aarch64 {
-void getAArch64TargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
- llvm::opt::Arg *&A);
+ const llvm::Triple &Triple, llvm::opt::Arg *&A);
} // end namespace aarch64
} // end namespace target
diff --git a/lib/Driver/ToolChains/Arch/ARM.cpp b/lib/Driver/ToolChains/Arch/ARM.cpp
index 886d947c586b..f55efc1a22e3 100644
--- a/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -378,6 +378,13 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
Features);
} else if (FPUArg) {
getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ } else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
+ // Android mandates minimum FPU requirements based on OS version.
+ const char *AndroidFPU =
+ Triple.isAndroidVersionLT(23) ? "vfpv3-d16" : "neon";
+ if (!llvm::ARM::getFPUFeatures(llvm::ARM::parseFPU(AndroidFPU), Features))
+ D.Diag(clang::diag::err_drv_clang_unsupported)
+ << std::string("-mfpu=") + AndroidFPU;
}
// Honor -mhwdiv=. ClangAs gives preference to -Wa,-mhwdiv=.
@@ -391,6 +398,33 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
} else if (HDivArg)
getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
+ // Handle (arch-dependent) fp16fml/fullfp16 relationship.
+ // Must happen before any features are disabled due to soft-float.
+ // FIXME: this fp16fml option handling will be reimplemented after the
+ // TargetParser rewrite.
+ const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
+ const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
+ if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8_4a) {
+ const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
+ if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
+ // Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
+ // Only append the +fp16fml if there is no -fp16fml after the +fullfp16.
+ if (std::find(Features.rbegin(), ItRFullFP16, "-fp16fml") == ItRFullFP16)
+ Features.push_back("+fp16fml");
+ }
+ else
+ goto fp16_fml_fallthrough;
+ }
+ else {
+fp16_fml_fallthrough:
+ // In both of these cases, putting the 'other' feature on the end of the vector will
+ // result in the same effect as placing it immediately after the current feature.
+ if (ItRNoFullFP16 < ItRFP16FML)
+ Features.push_back("-fp16fml");
+ else if (ItRNoFullFP16 > ItRFP16FML)
+ Features.push_back("+fullfp16");
+ }
+
// Setting -msoft-float/-mfloat-abi=soft effectively disables the FPU (GCC
// ignores the -mfpu options in this case).
// Note that the ABI can also be set implicitly by the target selected.
@@ -404,7 +438,7 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
// now just be explicit and disable all known dependent features
// as well.
for (std::string Feature : {"vfp2", "vfp3", "vfp4", "fp-armv8", "fullfp16",
- "neon", "crypto", "dotprod"})
+ "neon", "crypto", "dotprod", "fp16fml"})
if (std::find(std::begin(Features), std::end(Features), "+" + Feature) != std::end(Features))
Features.push_back(Args.MakeArgString("-" + Feature));
}
@@ -417,6 +451,26 @@ void arm::getARMTargetFeatures(const ToolChain &TC,
Features.push_back("-crc");
}
+ // For Arch >= ARMv8.0: crypto = sha2 + aes
+ // FIXME: this needs reimplementation after the TargetParser rewrite
+ if (ArchName.find_lower("armv8a") != StringRef::npos ||
+ ArchName.find_lower("armv8.1a") != StringRef::npos ||
+ ArchName.find_lower("armv8.2a") != StringRef::npos ||
+ ArchName.find_lower("armv8.3a") != StringRef::npos ||
+ ArchName.find_lower("armv8.4a") != StringRef::npos) {
+ if (ArchName.find_lower("+crypto") != StringRef::npos) {
+ if (ArchName.find_lower("+nosha2") == StringRef::npos)
+ Features.push_back("+sha2");
+ if (ArchName.find_lower("+noaes") == StringRef::npos)
+ Features.push_back("+aes");
+ } else if (ArchName.find_lower("-crypto") != StringRef::npos) {
+ if (ArchName.find_lower("+sha2") == StringRef::npos)
+ Features.push_back("-sha2");
+ if (ArchName.find_lower("+aes") == StringRef::npos)
+ Features.push_back("-aes");
+ }
+ }
+
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
@@ -589,7 +643,7 @@ StringRef arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch,
return llvm::ARM::getSubArch(ArchKind);
}
-void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs,
+void arm::appendBE8LinkFlag(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple) {
if (Args.hasArg(options::OPT_r))
return;
diff --git a/lib/Driver/ToolChains/Arch/ARM.h b/lib/Driver/ToolChains/Arch/ARM.h
index c1dc16884033..9f0dc4ea2e25 100644
--- a/lib/Driver/ToolChains/Arch/ARM.h
+++ b/lib/Driver/ToolChains/Arch/ARM.h
@@ -29,7 +29,7 @@ StringRef getARMCPUForMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
StringRef getLLVMArchSuffixForARM(llvm::StringRef CPU, llvm::StringRef Arch,
const llvm::Triple &Triple);
-void appendEBLinkFlags(const llvm::opt::ArgList &Args,
+void appendBE8LinkFlag(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
const llvm::Triple &Triple);
enum class ReadTPMode {
diff --git a/lib/Driver/ToolChains/Arch/Mips.cpp b/lib/Driver/ToolChains/Arch/Mips.cpp
index 6d814631d05f..e10a5e1c773f 100644
--- a/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -35,6 +35,11 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
DefMips64CPU = "mips64r6";
}
+ if (Triple.getSubArch() == llvm::Triple::MipsSubArch_r6) {
+ DefMips32CPU = "mips32r6";
+ DefMips64CPU = "mips64r6";
+ }
+
// MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
if (Triple.isAndroid()) {
DefMips32CPU = "mips32";
@@ -42,12 +47,12 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
}
// MIPS3 is the default for mips64*-unknown-openbsd.
- if (Triple.getOS() == llvm::Triple::OpenBSD)
+ if (Triple.isOSOpenBSD())
DefMips64CPU = "mips3";
// MIPS2 is the default for mips(el)?-unknown-freebsd.
// MIPS3 is the default for mips64(el)?-unknown-freebsd.
- if (Triple.getOS() == llvm::Triple::FreeBSD) {
+ if (Triple.isOSFreeBSD()) {
DefMips32CPU = "mips2";
DefMips64CPU = "mips3";
}
@@ -82,6 +87,9 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
}
}
+ if (ABIName.empty() && (Triple.getEnvironment() == llvm::Triple::GNUABIN32))
+ ABIName = "n32";
+
if (ABIName.empty() &&
(Triple.getVendor() == llvm::Triple::MipsTechnologies ||
Triple.getVendor() == llvm::Triple::ImaginationTechnologies)) {
diff --git a/lib/Driver/ToolChains/Arch/PPC.cpp b/lib/Driver/ToolChains/Arch/PPC.cpp
index f6a95962ace3..791f1206cf25 100644
--- a/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -107,15 +107,19 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (FloatABI == ppc::FloatABI::Soft)
Features.push_back("-hard-float");
- ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Args);
+ ppc::ReadGOTPtrMode ReadGOT = ppc::getPPCReadGOTPtrMode(D, Triple, Args);
if (ReadGOT == ppc::ReadGOTPtrMode::SecurePlt)
Features.push_back("+secure-plt");
}
-ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const ArgList &Args) {
+ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
- return ppc::ReadGOTPtrMode::Bss;
+ if (Triple.isOSOpenBSD())
+ return ppc::ReadGOTPtrMode::SecurePlt;
+ else
+ return ppc::ReadGOTPtrMode::Bss;
}
ppc::FloatABI ppc::getPPCFloatABI(const Driver &D, const ArgList &Args) {
diff --git a/lib/Driver/ToolChains/Arch/PPC.h b/lib/Driver/ToolChains/Arch/PPC.h
index 3acee91a2ac3..4f3cd688ca39 100644
--- a/lib/Driver/ToolChains/Arch/PPC.h
+++ b/lib/Driver/ToolChains/Arch/PPC.h
@@ -38,7 +38,7 @@ FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
std::string getPPCTargetCPU(const llvm::opt::ArgList &Args);
const char *getPPCAsmModeForCPU(StringRef Name);
-ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D,
+ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/lib/Driver/ToolChains/Arch/X86.cpp b/lib/Driver/ToolChains/Arch/X86.cpp
index 7a4f836d2e1a..45648945d5ef 100644
--- a/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/lib/Driver/ToolChains/Arch/X86.cpp
@@ -23,12 +23,8 @@ using namespace llvm::opt;
const char *x86::getX86TargetCPU(const ArgList &Args,
const llvm::Triple &Triple) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
- if (StringRef(A->getValue()) != "native") {
- if (Triple.isOSDarwin() && Triple.getArchName() == "x86_64h")
- return "core-avx2";
-
+ if (StringRef(A->getValue()) != "native")
return A->getValue();
- }
// FIXME: Reject attempts to use -march=native unless the target matches
// the host.
@@ -144,6 +140,34 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
Features.push_back("+ssse3");
}
+ // Translate the high level `-mretpoline` flag to the specific target feature
+ // flags. We also detect if the user asked for retpoline external thunks but
+ // failed to ask for retpolines themselves (through any of the different
+ // flags). This is a bit hacky but keeps existing usages working. We should
+ // consider deprecating this and instead warn if the user requests external
+ // retpoline thunks and *doesn't* request some form of retpolines.
+ if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline,
+ options::OPT_mspeculative_load_hardening,
+ options::OPT_mno_speculative_load_hardening)) {
+ if (Args.hasFlag(options::OPT_mretpoline, options::OPT_mno_retpoline,
+ false)) {
+ Features.push_back("+retpoline-indirect-calls");
+ Features.push_back("+retpoline-indirect-branches");
+ } else if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
+ options::OPT_mno_speculative_load_hardening,
+ false)) {
+ // On x86, speculative load hardening relies on at least using retpolines
+ // for indirect calls.
+ Features.push_back("+retpoline-indirect-calls");
+ }
+ } else if (Args.hasFlag(options::OPT_mretpoline_external_thunk,
+ options::OPT_mno_retpoline_external_thunk, false)) {
+ // FIXME: Add a warning about failing to specify `-mretpoline` and
+ // eventually switch to an error here.
+ Features.push_back("+retpoline-indirect-calls");
+ Features.push_back("+retpoline-indirect-branches");
+ }
+
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
diff --git a/lib/Driver/ToolChains/BareMetal.cpp b/lib/Driver/ToolChains/BareMetal.cpp
index c302d647b973..31d16922cc43 100644
--- a/lib/Driver/ToolChains/BareMetal.cpp
+++ b/lib/Driver/ToolChains/BareMetal.cpp
@@ -13,13 +13,13 @@
#include "InputInfo.h"
#include "Gnu.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm::opt;
@@ -119,10 +119,11 @@ void BareMetal::AddClangCXXStdlibIncludeArgs(
std::error_code EC;
Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
// Walk the subdirs, and find the one with the newest gcc version:
- for (vfs::directory_iterator LI =
- getDriver().getVFS().dir_begin(Dir.str(), EC), LE;
+ for (llvm::vfs::directory_iterator
+ LI = getDriver().getVFS().dir_begin(Dir.str(), EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
if (CandidateVersion.Major == -1)
continue;
diff --git a/lib/Driver/ToolChains/Clang.cpp b/lib/Driver/ToolChains/Clang.cpp
index 8e9c4c6aecb8..75f16898dfaf 100644
--- a/lib/Driver/ToolChains/Clang.cpp
+++ b/lib/Driver/ToolChains/Clang.cpp
@@ -19,12 +19,14 @@
#include "AMDGPU.h"
#include "CommonArgs.h"
#include "Hexagon.h"
+#include "MSP430.h"
#include "InputInfo.h"
#include "PS4CPU.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
+#include "clang/Driver/Distro.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
@@ -341,7 +343,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Args, Features);
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -363,6 +365,8 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::amdgcn:
amdgpu::getAMDGPUTargetFeatures(D, Args, Features);
break;
+ case llvm::Triple::msp430:
+ msp430::getMSP430TargetFeatures(D, Args, Features);
}
// Find the last of each feature.
@@ -493,6 +497,8 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
if (A.getOption().matches(options::OPT_gline_tables_only) ||
A.getOption().matches(options::OPT_ggdb1))
return codegenoptions::DebugLineTablesOnly;
+ if (A.getOption().matches(options::OPT_gline_directives_only))
+ return codegenoptions::DebugDirectivesOnly;
return codegenoptions::LimitedDebugInfo;
}
@@ -524,11 +530,12 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
break;
}
- if (Triple.getOS() == llvm::Triple::NetBSD) {
+ if (Triple.isOSNetBSD()) {
return !areOptimizationsEnabled(Args);
}
- if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI) {
+ if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI ||
+ Triple.isOSHurd()) {
switch (Triple.getArch()) {
// Don't use a frame pointer on linux if optimizing for certain targets.
case llvm::Triple::mips64:
@@ -800,6 +807,29 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
CmdArgs.push_back("-fcoverage-mapping");
}
+ if (Args.hasArg(options::OPT_fprofile_exclude_files_EQ)) {
+ auto *Arg = Args.getLastArg(options::OPT_fprofile_exclude_files_EQ);
+ if (!Args.hasArg(options::OPT_coverage))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fprofile-exclude-files="
+ << "--coverage";
+
+ StringRef v = Arg->getValue();
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fprofile-exclude-files=" + v)));
+ }
+
+ if (Args.hasArg(options::OPT_fprofile_filter_files_EQ)) {
+ auto *Arg = Args.getLastArg(options::OPT_fprofile_filter_files_EQ);
+ if (!Args.hasArg(options::OPT_coverage))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fprofile-filter-files="
+ << "--coverage";
+
+ StringRef v = Arg->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fprofile-filter-files=" + v)));
+ }
+
if (C.getArgs().hasArg(options::OPT_c) ||
C.getArgs().hasArg(options::OPT_S)) {
if (Output.isFilename()) {
@@ -889,6 +919,9 @@ static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
unsigned DwarfVersion,
llvm::DebuggerKind DebuggerTuning) {
switch (DebugInfoKind) {
+ case codegenoptions::DebugDirectivesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-directives-only");
+ break;
case codegenoptions::DebugLineTablesOnly:
CmdArgs.push_back("-debug-info-kind=line-tables-only");
break;
@@ -1100,10 +1133,19 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
StringRef ThroughHeader = YcArg ? YcArg->getValue() : YuArg->getValue();
if (!isa<PrecompileJobAction>(JA)) {
CmdArgs.push_back("-include-pch");
- CmdArgs.push_back(Args.MakeArgString(D.GetClPchPath(C, ThroughHeader)));
+ CmdArgs.push_back(Args.MakeArgString(D.GetClPchPath(
+ C, !ThroughHeader.empty()
+ ? ThroughHeader
+ : llvm::sys::path::filename(Inputs[0].getBaseInput()))));
+ }
+
+ if (ThroughHeader.empty()) {
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-pch-through-hdrstop-") + (YcArg ? "create" : "use")));
+ } else {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-pch-through-header=") + ThroughHeader));
}
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-pch-through-header=") + ThroughHeader));
}
}
@@ -1114,42 +1156,26 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
bool IsFirstImplicitInclude = !RenderedImplicitInclude;
RenderedImplicitInclude = true;
- // Use PCH if the user requested it.
- bool UsePCH = D.CCCUsePCH;
-
- bool FoundPTH = false;
bool FoundPCH = false;
SmallString<128> P(A->getValue());
// We want the files to have a name like foo.h.pch. Add a dummy extension
// so that replace_extension does the right thing.
P += ".dummy";
- if (UsePCH) {
- llvm::sys::path::replace_extension(P, "pch");
- if (llvm::sys::fs::exists(P))
- FoundPCH = true;
- }
+ llvm::sys::path::replace_extension(P, "pch");
+ if (llvm::sys::fs::exists(P))
+ FoundPCH = true;
if (!FoundPCH) {
- llvm::sys::path::replace_extension(P, "pth");
- if (llvm::sys::fs::exists(P))
- FoundPTH = true;
- }
-
- if (!FoundPCH && !FoundPTH) {
llvm::sys::path::replace_extension(P, "gch");
if (llvm::sys::fs::exists(P)) {
- FoundPCH = UsePCH;
- FoundPTH = !UsePCH;
+ FoundPCH = true;
}
}
- if (FoundPCH || FoundPTH) {
+ if (FoundPCH) {
if (IsFirstImplicitInclude) {
A->claim();
- if (UsePCH)
- CmdArgs.push_back("-include-pch");
- else
- CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back("-include-pch");
CmdArgs.push_back(Args.MakeArgString(P));
continue;
} else {
@@ -1283,21 +1309,28 @@ static bool isNoCommonDefault(const llvm::Triple &Triple) {
}
}
-void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
- ArgStringList &CmdArgs, bool KernelOrKext) const {
+namespace {
+void RenderARMABI(const llvm::Triple &Triple, const ArgList &Args,
+ ArgStringList &CmdArgs) {
// Select the ABI to use.
// FIXME: Support -meabi.
// FIXME: Parts of this are duplicated in the backend, unify this somehow.
const char *ABIName = nullptr;
- if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
ABIName = A->getValue();
- else {
+ } else {
std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false);
ABIName = llvm::ARM::computeDefaultTargetABI(Triple, CPU).data();
}
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
+}
+}
+
+void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
+ ArgStringList &CmdArgs, bool KernelOrKext) const {
+ RenderARMABI(Triple, Args, CmdArgs);
// Determine floating point ABI from the options & target defaults.
arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
@@ -1409,19 +1442,59 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
}
}
-void Clang::AddAArch64TargetArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+// Parse -mbranch-protection=<protection>[+<protection>]* where
+// <protection> ::= standard | none | [bti,pac-ret[+b-key,+leaf]*]
+// Returns a triple of (return address signing Scope, signing key, require
+// landing pads)
+static std::tuple<StringRef, StringRef, bool>
+ParseAArch64BranchProtection(const Driver &D, const ArgList &Args,
+ const Arg *A) {
+ StringRef Scope = "none";
+ StringRef Key = "a_key";
+ bool IndirectBranches = false;
- if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
- Args.hasArg(options::OPT_mkernel) ||
- Args.hasArg(options::OPT_fapple_kext))
- CmdArgs.push_back("-disable-red-zone");
+ StringRef Value = A->getValue();
+ // This maps onto -mbranch-protection=<scope>+<key>
+
+ if (Value.equals("standard")) {
+ Scope = "non-leaf";
+ Key = "a_key";
+ IndirectBranches = true;
+
+ } else if (!Value.equals("none")) {
+ SmallVector<StringRef, 4> BranchProtection;
+ StringRef(A->getValue()).split(BranchProtection, '+');
+
+ auto Protection = BranchProtection.begin();
+ while (Protection != BranchProtection.end()) {
+ if (Protection->equals("bti"))
+ IndirectBranches = true;
+ else if (Protection->equals("pac-ret")) {
+ Scope = "non-leaf";
+ while (++Protection != BranchProtection.end()) {
+ // Inner loop as "leaf" and "b-key" options must only appear attached
+ // to pac-ret.
+ if (Protection->equals("leaf"))
+ Scope = "all";
+ else if (Protection->equals("b-key"))
+ Key = "b_key";
+ else
+ break;
+ }
+ Protection--;
+ } else
+ D.Diag(diag::err_invalid_branch_protection)
+ << *Protection << A->getAsString(Args);
+ Protection++;
+ }
+ }
- if (!Args.hasFlag(options::OPT_mimplicit_float,
- options::OPT_mno_implicit_float, true))
- CmdArgs.push_back("-no-implicit-float");
+ return std::make_tuple(Scope, Key, IndirectBranches);
+}
+namespace {
+void RenderAArch64ABI(const llvm::Triple &Triple, const ArgList &Args,
+ ArgStringList &CmdArgs) {
const char *ABIName = nullptr;
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
ABIName = A->getValue();
@@ -1432,6 +1505,23 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
+}
+}
+
+void Clang::AddAArch64TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+
+ if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
+ Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-disable-red-zone");
+
+ if (!Args.hasFlag(options::OPT_mimplicit_float,
+ options::OPT_mno_implicit_float, true))
+ CmdArgs.push_back("-no-implicit-float");
+
+ RenderAArch64ABI(Triple, Args, CmdArgs);
if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
options::OPT_mno_fix_cortex_a53_835769)) {
@@ -1455,6 +1545,35 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
else
CmdArgs.push_back("-aarch64-enable-global-merge=true");
}
+
+ // Enable/disable return address signing and indirect branch targets.
+ if (Arg *A = Args.getLastArg(options::OPT_msign_return_address_EQ,
+ options::OPT_mbranch_protection_EQ)) {
+
+ const Driver &D = getToolChain().getDriver();
+
+ StringRef Scope, Key;
+ bool IndirectBranches;
+
+ if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
+ Scope = A->getValue();
+ if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
+ !Scope.equals("all"))
+ D.Diag(diag::err_invalid_branch_protection)
+ << Scope << A->getAsString(Args);
+ Key = "a_key";
+ IndirectBranches = false;
+ } else
+ std::tie(Scope, Key, IndirectBranches) =
+ ParseAArch64BranchProtection(D, Args, A);
+
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-msign-return-address=") + Scope));
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
+ if (IndirectBranches)
+ CmdArgs.push_back("-mbranch-target-enforce");
+ }
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@@ -1706,6 +1825,10 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
Args.hasArg(options::OPT_fapple_kext))
CmdArgs.push_back("-disable-red-zone");
+ if (!Args.hasFlag(options::OPT_mtls_direct_seg_refs,
+ options::OPT_mno_tls_direct_seg_refs, true))
+ CmdArgs.push_back("-mno-tls-direct-seg-refs");
+
// Default to avoid implicit floating-point for kernel/kext code, but allow
// that to be overridden with -mno-soft-float.
bool NoImplicitFloat = (Args.hasArg(options::OPT_mkernel) ||
@@ -2033,6 +2156,9 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
}
CmdArgs.push_back(Value.data());
TakeNextArg = true;
+ } else if (Value == "-fdebug-compilation-dir") {
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ TakeNextArg = true;
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -2045,6 +2171,11 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back("-target-feature");
CmdArgs.push_back(MipsTargetFeature);
}
+
+ // forward -fembed-bitcode to assmebler
+ if (C.getDriver().embedBitcodeEnabled() ||
+ C.getDriver().embedBitcodeMarkerOnly())
+ Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
}
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
@@ -2066,6 +2197,11 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef DenormalFPMath = "";
StringRef FPContract = "";
+ if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
+ CmdArgs.push_back("-mlimit-float-precision");
+ CmdArgs.push_back(A->getValue());
+ }
+
for (const Arg *A : Args) {
switch (A->getOption().getID()) {
// If this isn't an FP option skip the claim below
@@ -2228,8 +2364,6 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
// Treat blocks as analysis entry points.
CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
- CmdArgs.push_back("-analyzer-eagerly-assume");
-
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
@@ -2339,6 +2473,50 @@ static void RenderSSPOptions(const ToolChain &TC, const ArgList &Args,
}
}
+static void RenderTrivialAutoVarInitOptions(const Driver &D,
+ const ToolChain &TC,
+ const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ auto DefaultTrivialAutoVarInit = TC.GetDefaultTrivialAutoVarInit();
+ StringRef TrivialAutoVarInit = "";
+
+ for (const Arg *A : Args) {
+ switch (A->getOption().getID()) {
+ default:
+ continue;
+ case options::OPT_ftrivial_auto_var_init: {
+ A->claim();
+ StringRef Val = A->getValue();
+ if (Val == "uninitialized" || Val == "zero" || Val == "pattern")
+ TrivialAutoVarInit = Val;
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ break;
+ }
+ }
+ }
+
+ if (TrivialAutoVarInit.empty())
+ switch (DefaultTrivialAutoVarInit) {
+ case LangOptions::TrivialAutoVarInitKind::Uninitialized:
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Pattern:
+ TrivialAutoVarInit = "pattern";
+ break;
+ case LangOptions::TrivialAutoVarInitKind::Zero:
+ TrivialAutoVarInit = "zero";
+ break;
+ }
+
+ if (!TrivialAutoVarInit.empty()) {
+ if (TrivialAutoVarInit == "zero" && !Args.hasArg(options::OPT_enable_trivial_var_init_zero))
+ D.Diag(diag::err_drv_trivial_auto_var_init_zero_disabled);
+ CmdArgs.push_back(
+ Args.MakeArgString("-ftrivial-auto-var-init=" + TrivialAutoVarInit));
+ }
+}
+
static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
const unsigned ForwardedArguments[] = {
options::OPT_cl_opt_disable,
@@ -2658,8 +2836,10 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
CmdArgs.push_back("-fno-signed-char");
}
- if (Args.hasFlag(options::OPT_fchar8__t, options::OPT_fno_char8__t, false))
- CmdArgs.push_back("-fchar8_t");
+ // The default depends on the language standard.
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fchar8__t, options::OPT_fno_char8__t))
+ A->render(Args, CmdArgs);
if (const Arg *A = Args.getLastArg(options::OPT_fshort_wchar,
options::OPT_fno_short_wchar)) {
@@ -2669,8 +2849,8 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
} else {
bool IsARM = T.isARM() || T.isThumb() || T.isAArch64();
CmdArgs.push_back("-fwchar-type=int");
- if (IsARM && !(T.isOSWindows() || T.getOS() == llvm::Triple::NetBSD ||
- T.getOS() == llvm::Triple::OpenBSD))
+ if (IsARM && !(T.isOSWindows() || T.isOSNetBSD() ||
+ T.isOSOpenBSD()))
CmdArgs.push_back("-fno-signed-wchar");
else
CmdArgs.push_back("-fsigned-wchar");
@@ -2701,7 +2881,6 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
// When ObjectiveC legacy runtime is in effect on MacOSX, turn on the option
// to do Array/Dictionary subscripting by default.
if (Arch == llvm::Triple::x86 && T.isMacOSX() &&
- !T.isMacOSXVersionLT(10, 7) &&
Runtime.getKind() == ObjCRuntime::FragileMacOSX && Runtime.isNeXTFamily())
CmdArgs.push_back("-fobjc-subscripting-legacy-runtime");
@@ -2737,6 +2916,18 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
Args.ClaimAllArgs(options::OPT_fno_objc_arc_exceptions);
}
+ // Allow the user to control whether messages can be converted to runtime
+ // functions.
+ if (types::isObjC(Input.getType())) {
+ auto *Arg = Args.getLastArg(
+ options::OPT_fobjc_convert_messages_to_runtime_calls,
+ options::OPT_fno_objc_convert_messages_to_runtime_calls);
+ if (Arg &&
+ Arg->getOption().matches(
+ options::OPT_fno_objc_convert_messages_to_runtime_calls))
+ CmdArgs.push_back("-fno-objc-convert-messages-to-runtime-calls");
+ }
+
// -fobjc-infer-related-result-type is the default, except in the Objective-C
// rewriter.
if (InferCovariantReturns)
@@ -2872,12 +3063,35 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back("-fno-spell-checking");
}
+enum class DwarfFissionKind { None, Split, Single };
+
+static DwarfFissionKind getDebugFissionKind(const Driver &D,
+ const ArgList &Args, Arg *&Arg) {
+ Arg =
+ Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ);
+ if (!Arg)
+ return DwarfFissionKind::None;
+
+ if (Arg->getOption().matches(options::OPT_gsplit_dwarf))
+ return DwarfFissionKind::Split;
+
+ StringRef Value = Arg->getValue();
+ if (Value == "split")
+ return DwarfFissionKind::Split;
+ if (Value == "single")
+ return DwarfFissionKind::Single;
+
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << Arg->getValue();
+ return DwarfFissionKind::None;
+}
+
static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
bool EmitCodeView, bool IsWindowsMSVC,
ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
- const Arg *&SplitDWARFArg) {
+ DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -2902,10 +3116,12 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
Args.ClaimAllArgs(options::OPT_g_Group);
- SplitDWARFArg = Args.getLastArg(options::OPT_gsplit_dwarf);
+ Arg* SplitDWARFArg;
+ DwarfFission = getDebugFissionKind(D, Args, SplitDWARFArg);
- if (SplitDWARFArg && !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
- SplitDWARFArg = nullptr;
+ if (DwarfFission != DwarfFissionKind::None &&
+ !checkDebugInfoOption(SplitDWARFArg, Args, D, TC)) {
+ DwarfFission = DwarfFissionKind::None;
SplitDWARFInlining = false;
}
@@ -2922,12 +3138,13 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// composing split-dwarf and line-tables-only, so let those compose
// naturally in that case. And if you just turned off debug info,
// (-gsplit-dwarf -g0) - do that.
- if (SplitDWARFArg) {
+ if (DwarfFission != DwarfFissionKind::None) {
if (A->getIndex() > SplitDWARFArg->getIndex()) {
if (DebugInfoKind == codegenoptions::NoDebugInfo ||
+ DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
(DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
SplitDWARFInlining))
- SplitDWARFArg = nullptr;
+ DwarfFission = DwarfFissionKind::None;
} else if (SplitDWARFInlining)
DebugInfoKind = codegenoptions::NoDebugInfo;
}
@@ -2960,21 +3177,28 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC))
DWARFVersion = DwarfVersionNum(A->getSpelling());
- // Forward -gcodeview. EmitCodeView might have been set by CL-compatibility
- // argument parsing.
- if (EmitCodeView) {
- if (const Arg *A = Args.getLastArg(options::OPT_gcodeview)) {
- EmitCodeView = checkDebugInfoOption(A, Args, D, TC);
- if (EmitCodeView) {
- // DWARFVersion remains at 0 if no explicit choice was made.
- CmdArgs.push_back("-gcodeview");
- }
- }
+ if (const Arg *A = Args.getLastArg(options::OPT_gcodeview)) {
+ if (checkDebugInfoOption(A, Args, D, TC))
+ EmitCodeView = true;
}
+ // If the user asked for debug info but did not explicitly specify -gcodeview
+ // or -gdwarf, ask the toolchain for the default format.
if (!EmitCodeView && DWARFVersion == 0 &&
- DebugInfoKind != codegenoptions::NoDebugInfo)
- DWARFVersion = TC.GetDefaultDwarfVersion();
+ DebugInfoKind != codegenoptions::NoDebugInfo) {
+ switch (TC.getDefaultDebugFormat()) {
+ case codegenoptions::DIF_CodeView:
+ EmitCodeView = true;
+ break;
+ case codegenoptions::DIF_DWARF:
+ DWARFVersion = TC.GetDefaultDwarfVersion();
+ break;
+ }
+ }
+
+ // -gline-directives-only supported only for the DWARF debug info.
+ if (DWARFVersion == 0 && DebugInfoKind == codegenoptions::DebugDirectivesOnly)
+ DebugInfoKind = codegenoptions::NoDebugInfo;
// We ignore flag -gstrict-dwarf for now.
// And we handle flag -grecord-gcc-switches later with DWARFDebugFlags.
@@ -2993,10 +3217,11 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-dwarf-column-info");
// FIXME: Move backend command line options to the module.
- // If -gline-tables-only is the last option it wins.
+ // If -gline-tables-only or -gline-directives-only is the last option it wins.
if (const Arg *A = Args.getLastArg(options::OPT_gmodules))
if (checkDebugInfoOption(A, Args, D, TC)) {
- if (DebugInfoKind != codegenoptions::DebugLineTablesOnly) {
+ if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
+ DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
DebugInfoKind = codegenoptions::LimitedDebugInfo;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
@@ -3005,15 +3230,19 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
// -gsplit-dwarf should turn on -g and enable the backend dwarf
// splitting and extraction.
- // FIXME: Currently only works on Linux.
- if (T.isOSLinux()) {
+ // FIXME: Currently only works on Linux and Fuchsia.
+ if (T.isOSLinux() || T.isOSFuchsia()) {
if (!SplitDWARFInlining)
CmdArgs.push_back("-fno-split-dwarf-inlining");
- if (SplitDWARFArg) {
+ if (DwarfFission != DwarfFissionKind::None) {
if (DebugInfoKind == codegenoptions::NoDebugInfo)
DebugInfoKind = codegenoptions::LimitedDebugInfo;
- CmdArgs.push_back("-enable-split-dwarf");
+
+ if (DwarfFission == DwarfFissionKind::Single)
+ CmdArgs.push_back("-enable-split-dwarf=single");
+ else
+ CmdArgs.push_back("-enable-split-dwarf");
}
}
@@ -3044,6 +3273,19 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-gembed-source");
}
+ if (EmitCodeView) {
+ CmdArgs.push_back("-gcodeview");
+
+ // Emit codeview type hashes if requested.
+ if (Args.hasFlag(options::OPT_gcodeview_ghash,
+ options::OPT_gno_codeview_ghash, false)) {
+ CmdArgs.push_back("-gcodeview-ghash");
+ }
+ }
+
+ // Adjust the debug info kind for the given toolchain.
+ TC.adjustDebugInfoKind(DebugInfoKind, Args);
+
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DWARFVersion,
DebuggerTuning);
@@ -3055,11 +3297,24 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-debug-info-macro");
// -ggnu-pubnames turns on gnu style pubnames in the backend.
- if (Args.hasFlag(options::OPT_ggnu_pubnames, options::OPT_gno_gnu_pubnames,
- false))
- if (checkDebugInfoOption(Args.getLastArg(options::OPT_ggnu_pubnames), Args,
- D, TC))
- CmdArgs.push_back("-ggnu-pubnames");
+ const auto *PubnamesArg =
+ Args.getLastArg(options::OPT_ggnu_pubnames, options::OPT_gno_gnu_pubnames,
+ options::OPT_gpubnames, options::OPT_gno_pubnames);
+ if (DwarfFission != DwarfFissionKind::None ||
+ DebuggerTuning == llvm::DebuggerKind::LLDB ||
+ (PubnamesArg && checkDebugInfoOption(PubnamesArg, Args, D, TC)))
+ if (!PubnamesArg ||
+ (!PubnamesArg->getOption().matches(options::OPT_gno_gnu_pubnames) &&
+ !PubnamesArg->getOption().matches(options::OPT_gno_pubnames)))
+ CmdArgs.push_back(PubnamesArg && PubnamesArg->getOption().matches(
+ options::OPT_gpubnames)
+ ? "-gpubnames"
+ : "-ggnu-pubnames");
+
+ if (Args.hasFlag(options::OPT_fdebug_ranges_base_address,
+ options::OPT_fno_debug_ranges_base_address, false)) {
+ CmdArgs.push_back("-fdebug-ranges-base-address");
+ }
// -gdwarf-aranges turns on the emission of the aranges section in the
// backend.
@@ -3103,32 +3358,64 @@ static void RenderDebugOptions(const ToolChain &TC, const Driver &D,
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
- const llvm::Triple &RawTriple = getToolChain().getTriple();
- const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+ const auto &TC = getToolChain();
+ const llvm::Triple &RawTriple = TC.getTriple();
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
const std::string &TripleStr = Triple.getTriple();
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
- const Driver &D = getToolChain().getDriver();
+ const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
// Check number of inputs for sanity. We need at least one input.
assert(Inputs.size() >= 1 && "Must have at least one input.");
- const InputInfo &Input = Inputs[0];
// CUDA/HIP compilation may have multiple inputs (source file + results of
// device-side compilations). OpenMP device jobs also take the host IR as a
- // second input. All other jobs are expected to have exactly one
- // input.
+ // second input. Module precompilation accepts a list of header files to
+ // include as part of the module. All other jobs are expected to have exactly
+ // one input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
- assert((IsCuda || IsHIP || (IsOpenMPDevice && Inputs.size() == 2) ||
- Inputs.size() == 1) &&
- "Unable to handle multiple inputs.");
-
- const llvm::Triple *AuxTriple =
- IsCuda ? getToolChain().getAuxTriple() : nullptr;
+ bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
+
+ // A header module compilation doesn't have a main input file, so invent a
+ // fake one as a placeholder.
+ const char *ModuleName = [&]{
+ auto *ModuleNameArg = Args.getLastArg(options::OPT_fmodule_name_EQ);
+ return ModuleNameArg ? ModuleNameArg->getValue() : "";
+ }();
+ InputInfo HeaderModuleInput(Inputs[0].getType(), ModuleName, ModuleName);
+
+ const InputInfo &Input =
+ IsHeaderModulePrecompile ? HeaderModuleInput : Inputs[0];
+
+ InputInfoList ModuleHeaderInputs;
+ const InputInfo *CudaDeviceInput = nullptr;
+ const InputInfo *OpenMPDeviceInput = nullptr;
+ for (const InputInfo &I : Inputs) {
+ if (&I == &Input) {
+ // This is the primary input.
+ } else if (IsHeaderModulePrecompile &&
+ types::getPrecompiledType(I.getType()) == types::TY_PCH) {
+ types::ID Expected = HeaderModuleInput.getType();
+ if (I.getType() != Expected) {
+ D.Diag(diag::err_drv_module_header_wrong_kind)
+ << I.getFilename() << types::getTypeName(I.getType())
+ << types::getTypeName(Expected);
+ }
+ ModuleHeaderInputs.push_back(I);
+ } else if ((IsCuda || IsHIP) && !CudaDeviceInput) {
+ CudaDeviceInput = &I;
+ } else if (IsOpenMPDevice && !OpenMPDeviceInput) {
+ OpenMPDeviceInput = &I;
+ } else {
+ llvm_unreachable("unexpectedly given multiple inputs");
+ }
+ }
+ const llvm::Triple *AuxTriple = IsCuda ? TC.getAuxTriple() : nullptr;
bool IsWindowsGNU = RawTriple.isWindowsGNUEnvironment();
bool IsWindowsCygnus = RawTriple.isWindowsCygwinEnvironment();
bool IsWindowsMSVC = RawTriple.isWindowsMSVCEnvironment();
@@ -3204,7 +3491,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Push all default warning arguments that are specific to
// the given target. These come before user provided warning options
// are provided.
- getToolChain().addClangWarningOptions(CmdArgs);
+ TC.addClangWarningOptions(CmdArgs);
// Select the appropriate action.
RewriteKind rewriteKind = RK_None;
@@ -3231,17 +3518,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Also ignore explicit -force_cpusubtype_ALL option.
(void)Args.hasArg(options::OPT_force__cpusubtype__ALL);
} else if (isa<PrecompileJobAction>(JA)) {
- // Use PCH if the user requested it.
- bool UsePCH = D.CCCUsePCH;
-
if (JA.getType() == types::TY_Nothing)
CmdArgs.push_back("-fsyntax-only");
else if (JA.getType() == types::TY_ModuleFile)
- CmdArgs.push_back("-emit-module-interface");
- else if (UsePCH)
- CmdArgs.push_back("-emit-pch");
+ CmdArgs.push_back(IsHeaderModulePrecompile
+ ? "-emit-header-module"
+ : "-emit-module-interface");
else
- CmdArgs.push_back("-emit-pth");
+ CmdArgs.push_back("-emit-pch");
} else if (isa<VerifyPCHJobAction>(JA)) {
CmdArgs.push_back("-verify-pch");
} else {
@@ -3305,13 +3589,116 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_save_temps_EQ);
// Embed-bitcode option.
+ // Only white-listed flags below are allowed to be embedded.
if (C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO() &&
(isa<BackendJobAction>(JA) || isa<AssembleJobAction>(JA))) {
// Add flags implied by -fembed-bitcode.
Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
// Disable all llvm IR level optimizations.
CmdArgs.push_back("-disable-llvm-passes");
+
+ // reject options that shouldn't be supported in bitcode
+ // also reject kernel/kext
+ static const constexpr unsigned kBitcodeOptionBlacklist[] = {
+ options::OPT_mkernel,
+ options::OPT_fapple_kext,
+ options::OPT_ffunction_sections,
+ options::OPT_fno_function_sections,
+ options::OPT_fdata_sections,
+ options::OPT_fno_data_sections,
+ options::OPT_funique_section_names,
+ options::OPT_fno_unique_section_names,
+ options::OPT_mrestrict_it,
+ options::OPT_mno_restrict_it,
+ options::OPT_mstackrealign,
+ options::OPT_mno_stackrealign,
+ options::OPT_mstack_alignment,
+ options::OPT_mcmodel_EQ,
+ options::OPT_mlong_calls,
+ options::OPT_mno_long_calls,
+ options::OPT_ggnu_pubnames,
+ options::OPT_gdwarf_aranges,
+ options::OPT_fdebug_types_section,
+ options::OPT_fno_debug_types_section,
+ options::OPT_fdwarf_directory_asm,
+ options::OPT_fno_dwarf_directory_asm,
+ options::OPT_mrelax_all,
+ options::OPT_mno_relax_all,
+ options::OPT_ftrap_function_EQ,
+ options::OPT_ffixed_r9,
+ options::OPT_mfix_cortex_a53_835769,
+ options::OPT_mno_fix_cortex_a53_835769,
+ options::OPT_ffixed_x18,
+ options::OPT_mglobal_merge,
+ options::OPT_mno_global_merge,
+ options::OPT_mred_zone,
+ options::OPT_mno_red_zone,
+ options::OPT_Wa_COMMA,
+ options::OPT_Xassembler,
+ options::OPT_mllvm,
+ };
+ for (const auto &A : Args)
+ if (std::find(std::begin(kBitcodeOptionBlacklist),
+ std::end(kBitcodeOptionBlacklist),
+ A->getOption().getID()) !=
+ std::end(kBitcodeOptionBlacklist))
+ D.Diag(diag::err_drv_unsupported_embed_bitcode) << A->getSpelling();
+
+ // Render the CodeGen options that need to be passed.
+ if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls))
+ CmdArgs.push_back("-mdisable-tail-calls");
+
+ RenderFloatingPointOptions(TC, D, isOptimizationLevelFast(Args), Args,
+ CmdArgs);
+
+ // Render ABI arguments
+ switch (TC.getArch()) {
+ default: break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ RenderARMABI(Triple, Args, CmdArgs);
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ RenderAArch64ABI(Triple, Args, CmdArgs);
+ break;
+ }
+
+ // Optimization level for CodeGen.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4)) {
+ CmdArgs.push_back("-O3");
+ D.Diag(diag::warn_O4_is_O3);
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ // Input/Output file.
+ if (Output.getType() == types::TY_Dependencies) {
+ // Handled with other dependency code.
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Input output.");
+ }
+
+ for (const auto &II : Inputs) {
+ addDashXForInput(Args, II, CmdArgs);
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, D.getClangProgramPath(),
+ CmdArgs, Inputs));
+ return;
}
+
if (C.getDriver().embedBitcodeMarkerOnly() && !C.getDriver().isUsingLTO())
CmdArgs.push_back("-fembed-bitcode=marker");
@@ -3346,12 +3733,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-static-define");
+ if (Args.hasArg(options::OPT_municode))
+ CmdArgs.push_back("-DUNICODE");
+
if (isa<AnalyzeJobAction>(JA))
RenderAnalyzerOptions(Args, CmdArgs, Triple, Input);
+ // Enable compatilibily mode to avoid analyzer-config related errors.
+ // Since we can't access frontend flags through hasArg, let's manually iterate
+ // through them.
+ bool FoundAnalyzerConfig = false;
+ for (auto Arg : Args.filtered(options::OPT_Xclang))
+ if (StringRef(Arg->getValue()) == "-analyzer-config") {
+ FoundAnalyzerConfig = true;
+ break;
+ }
+ if (!FoundAnalyzerConfig)
+ for (auto Arg : Args.filtered(options::OPT_Xanalyzer))
+ if (StringRef(Arg->getValue()) == "-analyzer-config") {
+ FoundAnalyzerConfig = true;
+ break;
+ }
+ if (FoundAnalyzerConfig)
+ CmdArgs.push_back("-analyzer-config-compatibility-mode=true");
+
CheckCodeGenerationOptions(D, Args);
- unsigned FunctionAlignment = ParseFunctionAlignment(getToolChain(), Args);
+ unsigned FunctionAlignment = ParseFunctionAlignment(TC, Args);
assert(FunctionAlignment <= 31 && "function alignment will be truncated!");
if (FunctionAlignment) {
CmdArgs.push_back("-function-alignment");
@@ -3361,8 +3769,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
- std::tie(RelocationModel, PICLevel, IsPIE) =
- ParsePICArgs(getToolChain(), Args);
+ std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(TC, Args);
const char *RMName = RelocationModelName(RelocationModel);
@@ -3390,13 +3797,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mthread-model");
if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) {
- if (!getToolChain().isThreadModelSupported(A->getValue()))
+ if (!TC.isThreadModelSupported(A->getValue()))
D.Diag(diag::err_drv_invalid_thread_model_for_target)
<< A->getValue() << A->getAsString(Args);
CmdArgs.push_back(A->getValue());
}
else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().getThreadModel()));
+ CmdArgs.push_back(Args.MakeArgString(TC.getThreadModel()));
Args.AddLastArg(CmdArgs, options::OPT_fveclib);
@@ -3451,7 +3858,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fpcc_struct_return,
options::OPT_freg_struct_return)) {
- if (getToolChain().getArch() != llvm::Triple::x86) {
+ if (TC.getArch() != llvm::Triple::x86) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << RawTriple.str();
} else if (A->getOption().matches(options::OPT_fpcc_struct_return)) {
@@ -3516,18 +3923,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("-split-stacks");
- RenderFloatingPointOptions(getToolChain(), D, OFastEnabled, Args, CmdArgs);
+ RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs);
// Decide whether to use verbose asm. Verbose assembly is the default on
// toolchains which have the integrated assembler on by default.
- bool IsIntegratedAssemblerDefault =
- getToolChain().IsIntegratedAssemblerDefault();
+ bool IsIntegratedAssemblerDefault = TC.IsIntegratedAssemblerDefault();
if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
IsIntegratedAssemblerDefault) ||
Args.hasArg(options::OPT_dA))
CmdArgs.push_back("-masm-verbose");
- if (!getToolChain().useIntegratedAs())
+ if (!TC.useIntegratedAs())
CmdArgs.push_back("-no-integrated-as");
if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
@@ -3580,20 +3986,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool AsynchronousUnwindTables =
Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
options::OPT_fno_asynchronous_unwind_tables,
- (getToolChain().IsUnwindTablesDefault(Args) ||
- getToolChain().getSanitizerArgs().needsUnwindTables()) &&
+ (TC.IsUnwindTablesDefault(Args) ||
+ TC.getSanitizerArgs().needsUnwindTables()) &&
!Freestanding);
if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
AsynchronousUnwindTables))
CmdArgs.push_back("-munwind-tables");
- getToolChain().addClangTargetOptions(Args, CmdArgs,
- JA.getOffloadingDeviceKind());
-
- if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
- CmdArgs.push_back("-mlimit-float-precision");
- CmdArgs.push_back(A->getValue());
- }
+ TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
// FIXME: Handle -mtune=.
(void)Args.hasArg(options::OPT_mtune_EQ);
@@ -3620,22 +4020,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
types::ID InputType = Input.getType();
if (D.IsCLMode())
AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
- else
- EmitCodeView = Args.hasArg(options::OPT_gcodeview);
- const Arg *SplitDWARFArg = nullptr;
- RenderDebugOptions(getToolChain(), D, RawTriple, Args, EmitCodeView,
- IsWindowsMSVC, CmdArgs, DebugInfoKind, SplitDWARFArg);
+ DwarfFissionKind DwarfFission;
+ RenderDebugOptions(TC, D, RawTriple, Args, EmitCodeView, IsWindowsMSVC,
+ CmdArgs, DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
- bool SplitDWARF = SplitDWARFArg && RawTriple.isOSLinux() &&
+ bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
+ (RawTriple.isOSLinux() || RawTriple.isOSFuchsia()) &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
const char *SplitDWARFOut;
if (SplitDWARF) {
CmdArgs.push_back("-split-dwarf-file");
- SplitDWARFOut = SplitDebugName(Args, Input);
+ SplitDWARFOut = SplitDebugName(Args, Output);
CmdArgs.push_back(SplitDWARFOut);
}
@@ -3653,7 +4052,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_fallow_unsupported)) {
Arg *Unsupported;
if (types::isCXX(InputType) && RawTriple.isOSDarwin() &&
- getToolChain().getArch() == llvm::Triple::x86) {
+ TC.getArch() == llvm::Triple::x86) {
if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)) ||
(Unsupported = Args.getLastArg(options::OPT_mkernel)))
D.Diag(diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
@@ -3717,9 +4116,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
ABICompatArg->render(Args, CmdArgs);
// Add runtime flag for PS4 when PGO, coverage, or sanitizers are enabled.
- if (RawTriple.isPS4CPU()) {
- PS4cpu::addProfileRTArgs(getToolChain(), Args, CmdArgs);
- PS4cpu::addSanitizerArgs(getToolChain(), CmdArgs);
+ if (RawTriple.isPS4CPU() &&
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ PS4cpu::addProfileRTArgs(TC, Args, CmdArgs);
+ PS4cpu::addSanitizerArgs(TC, CmdArgs);
}
// Pass options for controlling the default header search paths.
@@ -3867,10 +4267,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-gnu-keywords");
}
- if (ShouldDisableDwarfDirectory(Args, getToolChain()))
+ if (ShouldDisableDwarfDirectory(Args, TC))
CmdArgs.push_back("-fno-dwarf-directory-asm");
- if (ShouldDisableAutolink(Args, getToolChain()))
+ if (ShouldDisableAutolink(Args, TC))
CmdArgs.push_back("-fno-autolink");
// Add in -fdebug-compilation-dir if necessary.
@@ -3916,6 +4316,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_relocatable_pch))
CmdArgs.push_back("-relocatable-pch");
+ if (const Arg *A = Args.getLastArg(options::OPT_fcf_runtime_abi_EQ)) {
+ static const char *kCFABIs[] = {
+ "standalone", "objc", "swift", "swift-5.0", "swift-4.2", "swift-4.1",
+ };
+
+ if (find(kCFABIs, StringRef(A->getValue())) == std::end(kCFABIs))
+ D.Diag(diag::err_drv_invalid_cf_runtime_abi) << A->getValue();
+ else
+ A->render(Args, CmdArgs);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) {
CmdArgs.push_back("-fconstant-string-class");
CmdArgs.push_back(A->getValue());
@@ -3983,6 +4394,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
@@ -3993,6 +4405,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
options::OPT_fno_emulated_tls);
+ Args.AddLastArg(CmdArgs, options::OPT_fkeep_static_consts);
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
@@ -4022,11 +4435,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_number_of_sm_EQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_blocks_per_sm_EQ);
+ if (Args.hasFlag(options::OPT_fopenmp_optimistic_collapse,
+ options::OPT_fno_openmp_optimistic_collapse,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-optimistic-collapse");
// When in OpenMP offloading mode with NVPTX target, forward
// cuda-mode flag
- Args.AddLastArg(CmdArgs, options::OPT_fopenmp_cuda_mode,
- options::OPT_fno_openmp_cuda_mode);
+ if (Args.hasFlag(options::OPT_fopenmp_cuda_mode,
+ options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-cuda-mode");
+
+ // When in OpenMP offloading mode with NVPTX target, check if full runtime
+ // is required.
+ if (Args.hasFlag(options::OPT_fopenmp_cuda_force_full_runtime,
+ options::OPT_fno_openmp_cuda_force_full_runtime,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-cuda-force-full-runtime");
break;
default:
// By default, if Clang doesn't know how to generate useful OpenMP code
@@ -4043,16 +4470,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
}
- const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs();
- Sanitize.addArgs(getToolChain(), Args, CmdArgs, InputType);
+ const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+ Sanitize.addArgs(TC, Args, CmdArgs, InputType);
- const XRayArgs &XRay = getToolChain().getXRayArgs();
- XRay.addArgs(getToolChain(), Args, CmdArgs, InputType);
+ const XRayArgs &XRay = TC.getXRayArgs();
+ XRay.addArgs(TC, Args, CmdArgs, InputType);
- if (getToolChain().SupportsProfiling())
+ if (TC.SupportsProfiling())
Args.AddLastArg(CmdArgs, options::OPT_pg);
- if (getToolChain().SupportsProfiling())
+ if (TC.SupportsProfiling())
Args.AddLastArg(CmdArgs, options::OPT_mfentry);
// -flax-vector-conversions is default.
@@ -4099,7 +4526,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_pthread);
- RenderSSPOptions(getToolChain(), Args, CmdArgs, KernelOrKext);
+ if (Args.hasFlag(options::OPT_mspeculative_load_hardening, options::OPT_mno_speculative_load_hardening,
+ false))
+ CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
+
+ RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
+ RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
// Translate -mstackrealign
if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
@@ -4158,8 +4590,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
A->render(Args, CmdArgs);
}
+ Args.AddLastArg(CmdArgs, options::OPT_fprofile_remapping_file_EQ);
- RenderBuiltinOptions(getToolChain(), RawTriple, Args, CmdArgs);
+ RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
options::OPT_fno_assume_sane_operator_new))
@@ -4167,19 +4600,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fblocks=0 is default.
if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
- getToolChain().IsBlocksDefault()) ||
+ TC.IsBlocksDefault()) ||
(Args.hasArg(options::OPT_fgnu_runtime) &&
Args.hasArg(options::OPT_fobjc_nonfragile_abi) &&
!Args.hasArg(options::OPT_fno_blocks))) {
CmdArgs.push_back("-fblocks");
- if (!Args.hasArg(options::OPT_fgnu_runtime) &&
- !getToolChain().hasBlocksRuntime())
+ if (!Args.hasArg(options::OPT_fgnu_runtime) && !TC.hasBlocksRuntime())
CmdArgs.push_back("-fblocks-runtime-optional");
}
// -fencode-extended-block-signature=1 is default.
- if (getToolChain().IsEncodeExtendedBlockSignatureDefault())
+ if (TC.IsEncodeExtendedBlockSignatureDefault())
CmdArgs.push_back("-fencode-extended-block-signature");
if (Args.hasFlag(options::OPT_fcoroutines_ts, options::OPT_fno_coroutines_ts,
@@ -4204,7 +4636,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_felide_constructors, false))
CmdArgs.push_back("-fno-elide-constructors");
- ToolChain::RTTIMode RTTIMode = getToolChain().getRTTIMode();
+ ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
if (KernelOrKext || (types::isCXX(InputType) &&
(RTTIMode == ToolChain::RM_Disabled)))
@@ -4212,7 +4644,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fshort-enums=0 is default for all architectures except Hexagon.
if (Args.hasFlag(options::OPT_fshort_enums, options::OPT_fno_short_enums,
- getToolChain().getArch() == llvm::Triple::hexagon))
+ TC.getArch() == llvm::Triple::hexagon))
CmdArgs.push_back("-fshort-enums");
RenderCharacterOptions(Args, AuxTriple ? *AuxTriple : RawTriple, CmdArgs);
@@ -4222,8 +4654,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
!RawTriple.isOSWindows() &&
RawTriple.getOS() != llvm::Triple::Solaris &&
- getToolChain().getArch() != llvm::Triple::hexagon &&
- getToolChain().getArch() != llvm::Triple::xcore &&
+ TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
KernelOrKext)
@@ -4252,7 +4683,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_ms_extensions, true))))
CmdArgs.push_back("-fms-compatibility");
- VersionTuple MSVT = getToolChain().computeMSVCVersion(&D, Args);
+ VersionTuple MSVT = TC.computeMSVCVersion(&D, Args);
if (!MSVT.empty())
CmdArgs.push_back(
Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString()));
@@ -4330,8 +4761,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_experimental_new_pass_manager);
ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
- RenderObjCOptions(getToolChain(), D, RawTriple, Args, Runtime,
- rewriteKind != RK_None, Input, CmdArgs);
+ RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
+ Input, CmdArgs);
if (Args.hasFlag(options::OPT_fapplication_extension,
options::OPT_fno_application_extension, false))
@@ -4339,8 +4770,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Handle GCC-style exception args.
if (!C.getDriver().IsCLMode())
- addExceptionArgs(Args, InputType, getToolChain(), KernelOrKext, Runtime,
- CmdArgs);
+ addExceptionArgs(Args, InputType, TC, KernelOrKext, Runtime, CmdArgs);
// Handle exception personalities
Arg *A = Args.getLastArg(options::OPT_fsjlj_exceptions,
@@ -4355,7 +4785,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Opt.matches(options::OPT_fdwarf_exceptions))
CmdArgs.push_back("-fdwarf-exceptions");
} else {
- switch (getToolChain().GetExceptionModel(Args)) {
+ switch (TC.GetExceptionModel(Args)) {
default:
break;
case llvm::ExceptionHandling::DwarfCFI:
@@ -4592,7 +5022,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// nice to enable this when doing a crashdump for modules as well.
if (Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) ||
- (C.isForDiagnostics() && (RewriteImports || !HaveModules)))
+ (C.isForDiagnostics() && !HaveModules))
CmdArgs.push_back("-frewrite-includes");
// Only allow -traditional or -traditional-cpp outside in preprocessing modes.
@@ -4685,23 +5115,39 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addDashXForInput(Args, Input, CmdArgs);
- if (Input.isFilename())
- CmdArgs.push_back(Input.getFilename());
- else
- Input.getInputArg().renderAsInput(Args, CmdArgs);
+ ArrayRef<InputInfo> FrontendInputs = Input;
+ if (IsHeaderModulePrecompile)
+ FrontendInputs = ModuleHeaderInputs;
+ else if (Input.isNothing())
+ FrontendInputs = {};
+
+ for (const InputInfo &Input : FrontendInputs) {
+ if (Input.isFilename())
+ CmdArgs.push_back(Input.getFilename());
+ else
+ Input.getInputArg().renderAsInput(Args, CmdArgs);
+ }
Args.AddAllArgs(CmdArgs, options::OPT_undef);
const char *Exec = D.getClangProgramPath();
- // Optionally embed the -cc1 level arguments into the debug info, for build
- // analysis.
+ // Optionally embed the -cc1 level arguments into the debug info or a
+ // section, for build analysis.
// Also record command line arguments into the debug info if
// -grecord-gcc-switches options is set on.
// By default, -gno-record-gcc-switches is set on and no recording.
- if (getToolChain().UseDwarfDebugFlags() ||
- Args.hasFlag(options::OPT_grecord_gcc_switches,
- options::OPT_gno_record_gcc_switches, false)) {
+ auto GRecordSwitches =
+ Args.hasFlag(options::OPT_grecord_command_line,
+ options::OPT_gno_record_command_line, false);
+ auto FRecordSwitches =
+ Args.hasFlag(options::OPT_frecord_command_line,
+ options::OPT_fno_record_command_line, false);
+ if (FRecordSwitches && !Triple.isOSBinFormatELF())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Args.getLastArg(options::OPT_frecord_command_line)->getAsString(Args)
+ << TripleStr;
+ if (TC.UseDwarfDebugFlags() || GRecordSwitches || FRecordSwitches) {
ArgStringList OriginalArgs;
for (const auto &Arg : Args)
Arg->render(Args, OriginalArgs);
@@ -4714,21 +5160,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Flags += " ";
Flags += EscapedArg;
}
- CmdArgs.push_back("-dwarf-debug-flags");
- CmdArgs.push_back(Args.MakeArgString(Flags));
+ auto FlagsArgString = Args.MakeArgString(Flags);
+ if (TC.UseDwarfDebugFlags() || GRecordSwitches) {
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(FlagsArgString);
+ }
+ if (FRecordSwitches) {
+ CmdArgs.push_back("-record-command-line");
+ CmdArgs.push_back(FlagsArgString);
+ }
}
- if (IsCuda) {
- // Host-side cuda compilation receives all device-side outputs in a single
- // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
- if (Inputs.size() > 1) {
- assert(Inputs.size() == 2 && "More than one GPU binary!");
+ // Host-side cuda compilation receives all device-side outputs in a single
+ // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
+ if ((IsCuda || IsHIP) && CudaDeviceInput) {
CmdArgs.push_back("-fcuda-include-gpubinary");
- CmdArgs.push_back(Inputs[1].getFilename());
- }
+ CmdArgs.push_back(CudaDeviceInput->getFilename());
+ if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
+ CmdArgs.push_back("-fgpu-rdc");
+ }
- if (Args.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc, false))
- CmdArgs.push_back("-fcuda-rdc");
+ if (IsCuda) {
if (Args.hasFlag(options::OPT_fcuda_short_ptr,
options::OPT_fno_cuda_short_ptr, false))
CmdArgs.push_back("-fcuda-short-ptr");
@@ -4741,9 +5193,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// only the relevant declarations are emitted.
if (IsOpenMPDevice) {
CmdArgs.push_back("-fopenmp-is-device");
- if (Inputs.size() == 2) {
+ if (OpenMPDeviceInput) {
CmdArgs.push_back("-fopenmp-host-ir-file-path");
- CmdArgs.push_back(Args.MakeArgString(Inputs.back().getFilename()));
+ CmdArgs.push_back(Args.MakeArgString(OpenMPDeviceInput->getFilename()));
}
}
@@ -4777,6 +5229,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fwhole-program-vtables");
}
+ bool RequiresSplitLTOUnit = WholeProgramVTables || Sanitize.needsLTO();
+ bool SplitLTOUnit =
+ Args.hasFlag(options::OPT_fsplit_lto_unit,
+ options::OPT_fno_split_lto_unit, RequiresSplitLTOUnit);
+ if (RequiresSplitLTOUnit && !SplitLTOUnit)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fno-split-lto-unit"
+ << (WholeProgramVTables ? "-fwhole-program-vtables" : "-fsanitize=cfi");
+ if (SplitLTOUnit)
+ CmdArgs.push_back("-fsplit-lto-unit");
+
if (Arg *A = Args.getLastArg(options::OPT_fexperimental_isel,
options::OPT_fno_experimental_isel)) {
CmdArgs.push_back("-mllvm");
@@ -4818,6 +5281,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_complete_member_pointers, false))
CmdArgs.push_back("-fcomplete-member-pointers");
+ if (!Args.hasFlag(options::OPT_fcxx_static_destructors,
+ options::OPT_fno_cxx_static_destructors, true))
+ CmdArgs.push_back("-fno-c++-static-destructors");
+
if (Arg *A = Args.getLastArg(options::OPT_moutline,
options::OPT_mno_outline)) {
if (A->getOption().matches(options::OPT_moutline)) {
@@ -4838,8 +5305,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig,
- getToolChain().getTriple().isOSBinFormatELF() &&
- getToolChain().useIntegratedAs()))
+ (TC.getTriple().isOSBinFormatELF() ||
+ TC.getTriple().isOSBinFormatCOFF()) &&
+ !TC.getTriple().isPS4() &&
+ !TC.getTriple().isOSNetBSD() &&
+ !Distro(D.getVFS()).IsGentoo() &&
+ !TC.getTriple().isAndroid() &&
+ TC.useIntegratedAs()))
CmdArgs.push_back("-faddrsig");
// Finally add the compile command to the compilation.
@@ -4860,16 +5332,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+ // Make the compile command echo its inputs for /showFilenames.
+ if (Output.getType() == types::TY_Object &&
+ Args.hasFlag(options::OPT__SLASH_showFilenames,
+ options::OPT__SLASH_showFilenames_, false)) {
+ C.getJobs().getJobs().back()->setPrintInputFilenames(true);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_pg))
- if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ if (!shouldUseFramePointer(Args, Triple))
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fomit-frame-pointer"
<< A->getAsString(Args);
// Claim some arguments which clang supports automatically.
// -fpch-preprocess is used with gcc to add a special marker in the output to
- // include the PCH file. Clang's PTH solution is completely transparent, so we
- // do not need to deal with it at all.
+ // include the PCH file.
Args.ClaimAllArgs(options::OPT_fpch_preprocess);
// Claim some arguments which clang doesn't support, but we don't
@@ -4912,7 +5390,8 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
}
if ((runtime.getKind() == ObjCRuntime::GNUstep) &&
(runtime.getVersion() >= VersionTuple(2, 0)))
- if (!getToolChain().getTriple().isOSBinFormatELF()) {
+ if (!getToolChain().getTriple().isOSBinFormatELF() &&
+ !getToolChain().getTriple().isOSBinFormatCOFF()) {
getToolChain().getDriver().Diag(
diag::err_drv_gnustep_objc_runtime_incompatible_binary)
<< runtime.getVersion().getMajor();
@@ -5163,7 +5642,6 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
*DebugInfoKind = codegenoptions::LimitedDebugInfo;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
- CmdArgs.push_back("-gcodeview");
} else {
*EmitCodeView = false;
}
@@ -5197,6 +5675,16 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
if (VolatileOptionID == options::OPT__SLASH_volatile_ms)
CmdArgs.push_back("-fms-volatile");
+ if (Args.hasFlag(options::OPT__SLASH_Zc_dllexportInlines_,
+ options::OPT__SLASH_Zc_dllexportInlines,
+ false)) {
+ if (Args.hasArg(options::OPT__SLASH_fallback)) {
+ D.Diag(clang::diag::err_drv_dllexport_inlines_and_fallback);
+ } else {
+ CmdArgs.push_back("-fno-dllexport-inlines");
+ }
+ }
+
Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
if (MostGeneralArg && BestCaseArg)
@@ -5270,9 +5758,28 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("msvc");
}
- if (Args.hasArg(options::OPT__SLASH_Guard) &&
- Args.getLastArgValue(options::OPT__SLASH_Guard).equals_lower("cf"))
- CmdArgs.push_back("-cfguard");
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
+ SmallVector<StringRef, 1> SplitArgs;
+ StringRef(A->getValue()).split(SplitArgs, ",");
+ bool Instrument = false;
+ bool NoChecks = false;
+ for (StringRef Arg : SplitArgs) {
+ if (Arg.equals_lower("cf"))
+ Instrument = true;
+ else if (Arg.equals_lower("cf-"))
+ Instrument = false;
+ else if (Arg.equals_lower("nochecks"))
+ NoChecks = true;
+ else if (Arg.equals_lower("nochecks-"))
+ NoChecks = false;
+ else
+ D.Diag(diag::err_drv_invalid_value) << A->getSpelling() << Arg;
+ }
+ // Currently there's no support emitting CFG instrumentation; the flag only
+ // emits the table of address-taken functions.
+ if (Instrument || NoChecks)
+ CmdArgs.push_back("-cfguard");
+ }
}
visualstudio::Compiler *Clang::getCLFallback() const {
@@ -5525,10 +6032,12 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (Args.hasArg(options::OPT_gsplit_dwarf) &&
- getToolChain().getTriple().isOSLinux()) {
+ const llvm::Triple &T = getToolChain().getTriple();
+ Arg *A;
+ if ((getDebugFissionKind(D, Args, A) == DwarfFissionKind::Split) &&
+ (T.isOSLinux() || T.isOSFuchsia())) {
CmdArgs.push_back("-split-dwarf-file");
- CmdArgs.push_back(SplitDebugName(Args, Input));
+ CmdArgs.push_back(SplitDebugName(Args, Output));
}
assert(Input.isFilename() && "Invalid input.");
diff --git a/lib/Driver/ToolChains/CommonArgs.cpp b/lib/Driver/ToolChains/CommonArgs.cpp
index 1e093b25b909..d7e316befa61 100644
--- a/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/lib/Driver/ToolChains/CommonArgs.cpp
@@ -14,13 +14,13 @@
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
#include "Arch/X86.h"
+#include "HIP.h"
#include "Hexagon.h"
#include "InputInfo.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
@@ -51,6 +51,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
using namespace clang::driver;
@@ -219,21 +220,6 @@ static std::string getR600TargetGPU(const ArgList &Args) {
return "";
}
-static std::string getNios2TargetCPU(const ArgList &Args) {
- Arg *A = Args.getLastArg(options::OPT_mcpu_EQ);
- if (!A)
- A = Args.getLastArg(options::OPT_march_EQ);
-
- if (!A)
- return "";
-
- const char *name = A->getValue();
- return llvm::StringSwitch<const char *>(name)
- .Case("r1", "nios2r1")
- .Case("r2", "nios2r2")
- .Default(name);
-}
-
static std::string getLanaiTargetCPU(const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
return A->getValue();
@@ -270,7 +256,7 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- return aarch64::getAArch64TargetCPU(Args, A);
+ return aarch64::getAArch64TargetCPU(Args, T, A);
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -286,10 +272,6 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
return A->getValue();
return "";
- case llvm::Triple::nios2: {
- return getNios2TargetCPU(Args);
- }
-
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -559,6 +541,40 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
return false;
}
+static void addSanitizerLibPath(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef Name) {
+ for (const auto &LibPath : TC.getLibraryPaths()) {
+ if (!LibPath.empty()) {
+ SmallString<128> P(LibPath);
+ llvm::sys::path::append(P, Name);
+ if (TC.getVFS().exists(P))
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + P));
+ }
+ }
+}
+
+void tools::addSanitizerPathLibArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.needsAsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "asan");
+ }
+ if (SanArgs.needsHwasanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "hwasan");
+ }
+ if (SanArgs.needsLsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "lsan");
+ }
+ if (SanArgs.needsMsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "msan");
+ }
+ if (SanArgs.needsTsanRt()) {
+ addSanitizerLibPath(TC, Args, CmdArgs, "tsan");
+ }
+}
+
+
+
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
// Force linking against the system libraries sanitizers depends on
@@ -568,19 +584,19 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
if (TC.getTriple().getOS() != llvm::Triple::RTEMS &&
!TC.getTriple().isAndroid()) {
CmdArgs.push_back("-lpthread");
- if (TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
}
CmdArgs.push_back("-lm");
// There's no libdl on all OSes.
- if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
- TC.getTriple().getOS() != llvm::Triple::NetBSD &&
- TC.getTriple().getOS() != llvm::Triple::OpenBSD &&
- TC.getTriple().getOS() != llvm::Triple::RTEMS)
+ if (!TC.getTriple().isOSFreeBSD() &&
+ !TC.getTriple().isOSNetBSD() &&
+ !TC.getTriple().isOSOpenBSD() &&
+ TC.getTriple().getOS() != llvm::Triple::RTEMS)
CmdArgs.push_back("-ldl");
// Required for backtrace on some OSes
- if (TC.getTriple().getOS() == llvm::Triple::NetBSD ||
- TC.getTriple().getOS() == llvm::Triple::FreeBSD)
+ if (TC.getTriple().isOSFreeBSD() ||
+ TC.getTriple().isOSNetBSD())
CmdArgs.push_back("-lexecinfo");
}
@@ -755,13 +771,13 @@ bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringLis
void tools::linkXRayRuntimeDeps(const ToolChain &TC, ArgStringList &CmdArgs) {
CmdArgs.push_back("--no-as-needed");
CmdArgs.push_back("-lpthread");
- if (TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
CmdArgs.push_back("-lm");
- if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
- TC.getTriple().getOS() != llvm::Triple::NetBSD &&
- TC.getTriple().getOS() != llvm::Triple::OpenBSD)
+ if (!TC.getTriple().isOSFreeBSD() &&
+ !TC.getTriple().isOSNetBSD() &&
+ !TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-ldl");
}
@@ -773,21 +789,18 @@ bool tools::areOptimizationsEnabled(const ArgList &Args) {
return false;
}
-const char *tools::SplitDebugName(const ArgList &Args, const InputInfo &Input) {
- Arg *FinalOutput = Args.getLastArg(options::OPT_o);
- if (FinalOutput && Args.hasArg(options::OPT_c)) {
- SmallString<128> T(FinalOutput->getValue());
- llvm::sys::path::replace_extension(T, "dwo");
- return Args.MakeArgString(T);
- } else {
- // Use the compilation dir.
- SmallString<128> T(
- Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
- SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
- llvm::sys::path::replace_extension(F, "dwo");
- T += F;
- return Args.MakeArgString(F);
- }
+const char *tools::SplitDebugName(const ArgList &Args,
+ const InputInfo &Output) {
+ SmallString<128> F(Output.isFilename()
+ ? Output.getFilename()
+ : llvm::sys::path::stem(Output.getBaseInput()));
+
+ if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
+ if (StringRef(A->getValue()) == "single")
+ return Args.MakeArgString(F);
+
+ llvm::sys::path::replace_extension(F, "dwo");
+ return Args.MakeArgString(F);
}
void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -901,7 +914,7 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
// OpenBSD-specific defaults for PIE
- if (Triple.getOS() == llvm::Triple::OpenBSD) {
+ if (Triple.isOSOpenBSD()) {
switch (ToolChain.getArch()) {
case llvm::Triple::arm:
case llvm::Triple::aarch64:
@@ -1126,23 +1139,39 @@ static void AddLibgcc(const llvm::Triple &Triple, const Driver &D,
bool IsIAMCU = Triple.isOSIAMCU();
bool StaticLibgcc = Args.hasArg(options::OPT_static_libgcc) ||
Args.hasArg(options::OPT_static);
- if (!D.CCCIsCXX())
+
+ bool SharedLibgcc = Args.hasArg(options::OPT_shared_libgcc);
+ bool UnspecifiedLibgcc = !StaticLibgcc && !SharedLibgcc;
+
+ // Gcc adds libgcc arguments in various ways:
+ //
+ // gcc <none>: -lgcc --as-needed -lgcc_s --no-as-needed
+ // g++ <none>: -lgcc_s -lgcc
+ // gcc shared: -lgcc_s -lgcc
+ // g++ shared: -lgcc_s -lgcc
+ // gcc static: -lgcc -lgcc_eh
+ // g++ static: -lgcc -lgcc_eh
+ //
+ // Also, certain targets need additional adjustments.
+
+ bool LibGccFirst = (D.CCCIsCC() && UnspecifiedLibgcc) || StaticLibgcc;
+ if (LibGccFirst)
CmdArgs.push_back("-lgcc");
- if (StaticLibgcc || isAndroid) {
- if (D.CCCIsCXX())
- CmdArgs.push_back("-lgcc");
- } else {
- if (!D.CCCIsCXX() && !isCygMing)
- CmdArgs.push_back("--as-needed");
+ bool AsNeeded = D.CCCIsCC() && UnspecifiedLibgcc && !isAndroid && !isCygMing;
+ if (AsNeeded)
+ CmdArgs.push_back("--as-needed");
+
+ if ((UnspecifiedLibgcc || SharedLibgcc) && !isAndroid)
CmdArgs.push_back("-lgcc_s");
- if (!D.CCCIsCXX() && !isCygMing)
- CmdArgs.push_back("--no-as-needed");
- }
- if (StaticLibgcc && !isAndroid && !IsIAMCU)
+ else if (StaticLibgcc && !isAndroid && !IsIAMCU)
CmdArgs.push_back("-lgcc_eh");
- else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX())
+
+ if (AsNeeded)
+ CmdArgs.push_back("--no-as-needed");
+
+ if (!LibGccFirst)
CmdArgs.push_back("-lgcc");
// According to Android ABI, we have to link with libdl if we are
@@ -1318,6 +1347,18 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
if (!JA.isHostOffloading(Action::OFK_HIP))
return;
+ InputInfoList DeviceInputs;
+ for (const auto &II : Inputs) {
+ const Action *A = II.getAction();
+ // Is this a device linking action?
+ if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
+ DeviceInputs.push_back(II);
+ }
+ }
+
+ if (DeviceInputs.empty())
+ return;
+
// Create temporary linker script. Keep it if save-temps is enabled.
const char *LKS;
SmallString<256> Name = llvm::sys::path::filename(Output.getFilename());
@@ -1345,39 +1386,12 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
"Wrong platform");
(void)HIPTC;
- // Construct clang-offload-bundler command to bundle object files for
- // for different GPU archs.
- ArgStringList BundlerArgs;
- BundlerArgs.push_back(Args.MakeArgString("-type=o"));
-
- // ToDo: Remove the dummy host binary entry which is required by
- // clang-offload-bundler.
- std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
- std::string BundlerInputArg = "-inputs=/dev/null";
-
- for (const auto &II : Inputs) {
- const Action *A = II.getAction();
- // Is this a device linking action?
- if (A && isa<LinkJobAction>(A) && A->isDeviceOffloading(Action::OFK_HIP)) {
- BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
- StringRef(A->getOffloadingArch()).str();
- BundlerInputArg = BundlerInputArg + "," + II.getFilename();
- }
- }
- BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
- BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
-
- std::string BundleFileName = C.getDriver().GetTemporaryPath("BUNDLE", "o");
+ // The output file name needs to persist through the compilation, therefore
+ // it needs to be created through MakeArgString.
+ std::string BundleFileName = C.getDriver().GetTemporaryPath("BUNDLE", "hipfb");
const char *BundleFile =
C.addTempFile(C.getArgs().MakeArgString(BundleFileName.c_str()));
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(BundleFile));
- BundlerArgs.push_back(BundlerOutputArg);
-
- SmallString<128> BundlerPath(C.getDriver().Dir);
- llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
- const char *Bundler = Args.MakeArgString(BundlerPath);
- C.addCommand(llvm::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+ AMDGCN::constructHIPFatbinCommand(C, JA, BundleFile, DeviceInputs, Args, T);
// Add commands to embed target binaries. We ensure that each section and
// image is 16-byte aligned. This is not mandatory, but increases the
@@ -1397,6 +1411,10 @@ void tools::AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
LksStream << " PROVIDE_HIDDEN(__hip_fatbin = .);\n";
LksStream << " " << BundleFileName << "\n";
LksStream << " }\n";
+ LksStream << " /DISCARD/ :\n";
+ LksStream << " {\n";
+ LksStream << " * ( __CLANG_OFFLOAD_BUNDLE__* )\n";
+ LksStream << " }\n";
LksStream << "}\n";
LksStream << "INSERT BEFORE .data\n";
LksStream.flush();
diff --git a/lib/Driver/ToolChains/CommonArgs.h b/lib/Driver/ToolChains/CommonArgs.h
index e8ebe2225e1c..3704b2e01b54 100644
--- a/lib/Driver/ToolChains/CommonArgs.h
+++ b/lib/Driver/ToolChains/CommonArgs.h
@@ -32,6 +32,10 @@ void claimNoWarnArgs(const llvm::opt::ArgList &Args);
bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
+void addSanitizerPathLibArgs(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
void linkSanitizerRuntimeDeps(const ToolChain &TC,
llvm::opt::ArgStringList &CmdArgs);
@@ -59,7 +63,7 @@ void AddHIPLinkerScript(const ToolChain &TC, Compilation &C,
const Tool &T);
const char *SplitDebugName(const llvm::opt::ArgList &Args,
- const InputInfo &Input);
+ const InputInfo &Output);
void SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
const JobAction &JA, const llvm::opt::ArgList &Args,
diff --git a/lib/Driver/ToolChains/CrossWindows.cpp b/lib/Driver/ToolChains/CrossWindows.cpp
index 6ca04a8a3abb..795356026fbe 100644
--- a/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/lib/Driver/ToolChains/CrossWindows.cpp
@@ -20,6 +20,7 @@ using namespace clang::driver;
using namespace clang::driver::toolchains;
using llvm::opt::ArgList;
+using llvm::opt::ArgStringList;
void tools::CrossWindows::Assembler::ConstructJob(
Compilation &C, const JobAction &JA, const InputInfo &Output,
diff --git a/lib/Driver/ToolChains/Cuda.cpp b/lib/Driver/ToolChains/Cuda.cpp
index 7fb4ae4ea9cf..57b8d4340e3b 100644
--- a/lib/Driver/ToolChains/Cuda.cpp
+++ b/lib/Driver/ToolChains/Cuda.cpp
@@ -11,7 +11,6 @@
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Distro.h"
@@ -23,6 +22,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -59,6 +59,8 @@ static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
return CudaVersion::CUDA_91;
if (Major == 9 && Minor == 2)
return CudaVersion::CUDA_92;
+ if (Major == 10 && Minor == 0)
+ return CudaVersion::CUDA_100;
return CudaVersion::UNKNOWN;
}
@@ -112,7 +114,7 @@ CudaInstallationDetector::CudaInstallationDetector(
for (const char *Ver : Versions)
Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
- if (Distro(D.getVFS()).IsDebian())
+ if (Distro(D.getVFS()).IsDebian() || Distro(D.getVFS()).IsUbuntu())
// Special case for Debian to have nvidia-cuda-toolkit work
// out of the box. More info on http://bugs.debian.org/882505
Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda");
@@ -165,7 +167,7 @@ CudaInstallationDetector::CudaInstallationDetector(
if (FS.exists(FilePath)) {
for (const char *GpuArchName :
{"sm_30", "sm_32", "sm_35", "sm_37", "sm_50", "sm_52", "sm_53",
- "sm_60", "sm_61", "sm_62", "sm_70", "sm_72"}) {
+ "sm_60", "sm_61", "sm_62", "sm_70", "sm_72", "sm_75"}) {
const CudaArch GpuArch = StringToCudaArch(GpuArchName);
if (Version >= MinVersionForCudaArch(GpuArch) &&
Version <= MaxVersionForCudaArch(GpuArch))
@@ -276,32 +278,44 @@ void CudaInstallationDetector::print(raw_ostream &OS) const {
}
namespace {
- /// Debug info kind.
-enum DebugInfoKind {
- NoDebug, /// No debug info.
- LineTableOnly, /// Line tables only.
- FullDebug /// Full debug info.
+/// Debug info level for the NVPTX devices. We may need to emit different debug
+/// info level for the host and for the device itselfi. This type controls
+/// emission of the debug info for the devices. It either prohibits disable info
+/// emission completely, or emits debug directives only, or emits same debug
+/// info as for the host.
+enum DeviceDebugInfoLevel {
+ DisableDebugInfo, /// Do not emit debug info for the devices.
+ DebugDirectivesOnly, /// Emit only debug directives.
+ EmitSameDebugInfoAsHost, /// Use the same debug info level just like for the
+ /// host.
};
} // anonymous namespace
-static DebugInfoKind mustEmitDebugInfo(const ArgList &Args) {
- Arg *A = Args.getLastArg(options::OPT_O_Group);
- if (Args.hasFlag(options::OPT_cuda_noopt_device_debug,
- options::OPT_no_cuda_noopt_device_debug,
- !A || A->getOption().matches(options::OPT_O0))) {
- if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- const Option &Opt = A->getOption();
- if (Opt.matches(options::OPT_gN_Group)) {
- if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
- return NoDebug;
- if (Opt.matches(options::OPT_gline_tables_only) ||
- Opt.matches(options::OPT_ggdb1))
- return LineTableOnly;
- }
- return FullDebug;
+/// Define debug info level for the NVPTX devices. If the debug info for both
+/// the host and device are disabled (-g0/-ggdb0 or no debug options at all). If
+/// only debug directives are requested for the both host and device
+/// (-gline-directvies-only), or the debug info only for the device is disabled
+/// (optimization is on and --cuda-noopt-device-debug was not specified), the
+/// debug directves only must be emitted for the device. Otherwise, use the same
+/// debug info level just like for the host (with the limitations of only
+/// supported DWARF2 standard).
+static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_O_Group);
+ bool IsDebugEnabled = !A || A->getOption().matches(options::OPT_O0) ||
+ Args.hasFlag(options::OPT_cuda_noopt_device_debug,
+ options::OPT_no_cuda_noopt_device_debug,
+ /*Default=*/false);
+ if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ const Option &Opt = A->getOption();
+ if (Opt.matches(options::OPT_gN_Group)) {
+ if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
+ return DisableDebugInfo;
+ if (Opt.matches(options::OPT_gline_directives_only))
+ return DebugDirectivesOnly;
}
+ return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly;
}
- return NoDebug;
+ return DisableDebugInfo;
}
void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
@@ -335,8 +349,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32");
- DebugInfoKind DIKind = mustEmitDebugInfo(Args);
- if (DIKind == FullDebug) {
+ DeviceDebugInfoLevel DIKind = mustEmitDebugInfo(Args);
+ if (DIKind == EmitSameDebugInfoAsHost) {
// ptxas does not accept -g option if optimization is enabled, so
// we ignore the compiler's -O* options if we want debug info.
CmdArgs.push_back("-g");
@@ -372,7 +386,7 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// to no optimizations, but ptxas's default is -O3.
CmdArgs.push_back("-O0");
}
- if (DIKind == LineTableOnly)
+ if (DIKind == DebugDirectivesOnly)
CmdArgs.push_back("-lineinfo");
// Pass -v to ptxas if it was passed to the driver.
@@ -396,8 +410,8 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fnoopenmp_relocatable_target,
/*Default=*/true);
else if (JA.isOffloading(Action::OFK_Cuda))
- Relocatable = Args.hasFlag(options::OPT_fcuda_rdc,
- options::OPT_fno_cuda_rdc, /*Default=*/false);
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
if (Relocatable)
CmdArgs.push_back("-c");
@@ -443,7 +457,7 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
CmdArgs.push_back(Args.MakeArgString("--create"));
CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
- if (mustEmitDebugInfo(Args) == FullDebug)
+ if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
for (const auto& II : Inputs) {
@@ -496,7 +510,7 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Output.getFilename());
} else
assert(Output.isNothing() && "Invalid output.");
- if (mustEmitDebugInfo(Args) == FullDebug)
+ if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
if (Args.hasArg(options::OPT_v))
@@ -509,6 +523,11 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-arch");
CmdArgs.push_back(Args.MakeArgString(GPUArch));
+ // Assume that the directory specified with --libomptarget_nvptx_path
+ // contains the static library libomptarget-nvptx.a.
+ if (const Arg *A = Args.getLastArg(options::OPT_libomptarget_nvptx_path_EQ))
+ CmdArgs.push_back(Args.MakeArgString(Twine("-L") + A->getValue()));
+
// Add paths specified in LIBRARY_PATH environment variable as -L options.
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
@@ -602,9 +621,9 @@ void CudaToolChain::addClangTargetOptions(
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
- if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
- CC1Args.push_back("-fcuda-rdc");
+ CC1Args.push_back("-fgpu-rdc");
}
if (DriverArgs.hasArg(options::OPT_nocudalib))
@@ -621,13 +640,16 @@ void CudaToolChain::addClangTargetOptions(
return;
}
- CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
// Libdevice in CUDA-7.0 requires PTX version that's more recent than LLVM
// defaults to. Use PTX4.2 by default, which is the PTX version that came with
// CUDA-7.0.
const char *PtxFeature = "+ptx42";
+ // TODO(tra): CUDA-10+ needs PTX 6.3 to support new features. However that
+ // requires fair amount of work on LLVM side. We'll keep using PTX 6.1 until
+ // all prerequisites are in place.
if (CudaInstallation.version() >= CudaVersion::CUDA_91) {
// CUDA-9.1 uses new instructions that are only available in PTX6.1+
PtxFeature = "+ptx61";
@@ -642,12 +664,9 @@ void CudaToolChain::addClangTargetOptions(
if (DeviceOffloadingKind == Action::OFK_OpenMP) {
SmallVector<StringRef, 8> LibraryPaths;
- // Add path to lib and/or lib64 folders.
- SmallString<256> DefaultLibPath =
- llvm::sys::path::parent_path(getDriver().Dir);
- llvm::sys::path::append(DefaultLibPath,
- Twine("lib") + CLANG_LIBDIR_SUFFIX);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
+ if (const Arg *A = DriverArgs.getLastArg(options::OPT_libomptarget_nvptx_path_EQ))
+ LibraryPaths.push_back(A->getValue());
// Add user defined library paths from LIBRARY_PATH.
llvm::Optional<std::string> LibPath =
@@ -660,6 +679,12 @@ void CudaToolChain::addClangTargetOptions(
LibraryPaths.emplace_back(Path.trim());
}
+ // Add path to lib / lib64 folder.
+ SmallString<256> DefaultLibPath =
+ llvm::sys::path::parent_path(getDriver().Dir);
+ llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
+ LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
std::string LibOmpTargetName =
"libomptarget-nvptx-" + GpuArch.str() + ".bc";
bool FoundBCLibrary = false;
@@ -667,7 +692,7 @@ void CudaToolChain::addClangTargetOptions(
SmallString<128> LibOmpTargetFile(LibraryPath);
llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
if (llvm::sys::fs::exists(LibOmpTargetFile)) {
- CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
FoundBCLibrary = true;
break;
@@ -691,6 +716,21 @@ bool CudaToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
O.matches(options::OPT_gcolumn_info);
}
+void CudaToolChain::adjustDebugInfoKind(
+ codegenoptions::DebugInfoKind &DebugInfoKind, const ArgList &Args) const {
+ switch (mustEmitDebugInfo(Args)) {
+ case DisableDebugInfo:
+ DebugInfoKind = codegenoptions::NoDebugInfo;
+ break;
+ case DebugDirectivesOnly:
+ DebugInfoKind = codegenoptions::DebugDirectivesOnly;
+ break;
+ case EmitSameDebugInfoAsHost:
+ // Use same debug info level as the host.
+ break;
+ }
+}
+
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Check our CUDA version if we're going to include the CUDA headers.
diff --git a/lib/Driver/ToolChains/Cuda.h b/lib/Driver/ToolChains/Cuda.h
index 01580cb66920..1d63ede41155 100644
--- a/lib/Driver/ToolChains/Cuda.h
+++ b/lib/Driver/ToolChains/Cuda.h
@@ -159,6 +159,8 @@ public:
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
bool supportsDebugInfoOption(const llvm::opt::Arg *A) const override;
+ void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const override;
bool IsMathErrnoDefault() const override { return false; }
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
diff --git a/lib/Driver/ToolChains/Darwin.cpp b/lib/Driver/ToolChains/Darwin.cpp
index 9205dd52de0b..c395c9a4430e 100644
--- a/lib/Driver/ToolChains/Darwin.cpp
+++ b/lib/Driver/ToolChains/Darwin.cpp
@@ -12,7 +12,6 @@
#include "CommonArgs.h"
#include "clang/Basic/AlignedAllocation.h"
#include "clang/Basic/ObjCRuntime.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -23,6 +22,7 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <cstdlib> // ::getenv
using namespace clang::driver;
@@ -98,7 +98,7 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
SourceAction = SourceAction->getInputs()[0];
}
- // If -fno-integrated-as is used add -Q to the darwin assember driver to make
+ // If -fno-integrated-as is used add -Q to the darwin assembler driver to make
// sure it runs its system assembler not clang's integrated assembler.
// Applicable to darwin11+ and Xcode 4+. darwin<10 lacked integrated-as.
// FIXME: at run-time detect assembler capabilities or rely on version
@@ -224,13 +224,20 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-application_extension");
- if (D.isUsingLTO()) {
- // If we are using LTO, then automatically create a temporary file path for
- // the linker to use, so that it's lifetime will extend past a possible
- // dsymutil step.
- if (Version[0] >= 116 && NeedsTempPath(Inputs)) {
- const char *TmpPath = C.getArgs().MakeArgString(
- D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
+ if (D.isUsingLTO() && Version[0] >= 116 && NeedsTempPath(Inputs)) {
+ std::string TmpPathName;
+ if (D.getLTOMode() == LTOK_Full) {
+ // If we are using full LTO, then automatically create a temporary file
+ // path for the linker to use, so that it's lifetime will extend past a
+ // possible dsymutil step.
+ TmpPathName =
+ D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object));
+ } else if (D.getLTOMode() == LTOK_Thin)
+ // If we are using thin LTO, then create a directory instead.
+ TmpPathName = D.GetTemporaryDirectory("thinlto");
+
+ if (!TmpPathName.empty()) {
+ auto *TmpPath = C.getArgs().MakeArgString(TmpPathName);
C.addTempFile(TmpPath);
CmdArgs.push_back("-object_path_lto");
CmdArgs.push_back(TmpPath);
@@ -509,15 +516,6 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
getMachOToolChain().addStartObjectFileArgs(Args, CmdArgs);
- // SafeStack requires its own runtime libraries
- // These libraries should be linked first, to make sure the
- // __safestack_init constructor executes before everything else
- if (getToolChain().getSanitizerArgs().needsSafeStackRt()) {
- getMachOToolChain().AddLinkRuntimeLib(Args, CmdArgs,
- "libclang_rt.safestack_osx.a",
- toolchains::Darwin::RLO_AlwaysLink);
- }
-
Args.AddAllArgs(CmdArgs, options::OPT_L);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
@@ -917,8 +915,19 @@ unsigned DarwinClang::GetDefaultDwarfVersion() const {
}
void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
- StringRef DarwinLibName,
- RuntimeLinkOptions Opts) const {
+ StringRef Component, RuntimeLinkOptions Opts,
+ bool IsShared) const {
+ SmallString<64> DarwinLibName = StringRef("libclang_rt.");
+ // an Darwin the builtins compomnent is not in the library name
+ if (Component != "builtins") {
+ DarwinLibName += Component;
+ if (!(Opts & RLO_IsEmbedded))
+ DarwinLibName += "_";
+ DarwinLibName += getOSLibraryNameSuffix();
+ } else
+ DarwinLibName += getOSLibraryNameSuffix(true);
+
+ DarwinLibName += IsShared ? "_dynamic.dylib" : ".a";
SmallString<128> Dir(getDriver().ResourceDir);
llvm::sys::path::append(
Dir, "lib", (Opts & RLO_IsEmbedded) ? "macho_embedded" : "darwin");
@@ -983,16 +992,19 @@ StringRef Darwin::getSDKName(StringRef isysroot) {
return "";
}
-StringRef Darwin::getOSLibraryNameSuffix() const {
- switch(TargetPlatform) {
+StringRef Darwin::getOSLibraryNameSuffix(bool IgnoreSim) const {
+ switch (TargetPlatform) {
case DarwinPlatformKind::MacOS:
return "osx";
case DarwinPlatformKind::IPhoneOS:
- return TargetEnvironment == NativeEnvironment ? "ios" : "iossim";
+ return TargetEnvironment == NativeEnvironment || IgnoreSim ? "ios"
+ : "iossim";
case DarwinPlatformKind::TvOS:
- return TargetEnvironment == NativeEnvironment ? "tvos" : "tvossim";
+ return TargetEnvironment == NativeEnvironment || IgnoreSim ? "tvos"
+ : "tvossim";
case DarwinPlatformKind::WatchOS:
- return TargetEnvironment == NativeEnvironment ? "watchos" : "watchossim";
+ return TargetEnvironment == NativeEnvironment || IgnoreSim ? "watchos"
+ : "watchossim";
}
llvm_unreachable("Unsupported platform");
}
@@ -1022,18 +1034,24 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
ArgStringList &CmdArgs) const {
if (!needsProfileRT(Args)) return;
- AddLinkRuntimeLib(
- Args, CmdArgs,
- (Twine("libclang_rt.profile_") + getOSLibraryNameSuffix() + ".a").str(),
- RuntimeLinkOptions(RLO_AlwaysLink | RLO_FirstLink));
+ AddLinkRuntimeLib(Args, CmdArgs, "profile",
+ RuntimeLinkOptions(RLO_AlwaysLink | RLO_FirstLink));
// If we have a symbol export directive and we're linking in the profile
// runtime, automatically export symbols necessary to implement some of the
// runtime's functionality.
if (hasExportSymbolDirective(Args)) {
- addExportedSymbol(CmdArgs, "___llvm_profile_filename");
- addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
- addExportedSymbol(CmdArgs, "_lprofCurFilename");
+ if (needsGCovInstrumentation(Args)) {
+ addExportedSymbol(CmdArgs, "___gcov_flush");
+ addExportedSymbol(CmdArgs, "_flush_fn_list");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ } else {
+ addExportedSymbol(CmdArgs, "___llvm_profile_filename");
+ addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
+ addExportedSymbol(CmdArgs, "_lprofCurFilename");
+ addExportedSymbol(CmdArgs, "_lprofMergeValueProfData");
+ }
+ addExportedSymbol(CmdArgs, "_lprofDirMode");
}
}
@@ -1042,12 +1060,7 @@ void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
StringRef Sanitizer,
bool Shared) const {
auto RLO = RuntimeLinkOptions(RLO_AlwaysLink | (Shared ? RLO_AddRPath : 0U));
- AddLinkRuntimeLib(Args, CmdArgs,
- (Twine("libclang_rt.") + Sanitizer + "_" +
- getOSLibraryNameSuffix() +
- (Shared ? "_dynamic.dylib" : ".a"))
- .str(),
- RLO);
+ AddLinkRuntimeLib(Args, CmdArgs, Sanitizer, RLO, Shared);
}
ToolChain::RuntimeLibType DarwinClang::GetRuntimeLibType(
@@ -1101,66 +1114,33 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
AddCXXStdlibLibArgs(Args, CmdArgs);
}
if (Sanitize.needsStatsRt()) {
- StringRef OS = isTargetMacOS() ? "osx" : "iossim";
- AddLinkRuntimeLib(Args, CmdArgs,
- (Twine("libclang_rt.stats_client_") + OS + ".a").str(),
- RLO_AlwaysLink);
+ AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
}
if (Sanitize.needsEsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "esan");
+ const XRayArgs &XRay = getXRayArgs();
+ if (XRay.needsXRayRt()) {
+ AddLinkRuntimeLib(Args, CmdArgs, "xray");
+ AddLinkRuntimeLib(Args, CmdArgs, "xray-basic");
+ AddLinkRuntimeLib(Args, CmdArgs, "xray-fdr");
+ }
+
// Otherwise link libSystem, then the dynamic runtime library, and finally any
// target specific static runtime library.
CmdArgs.push_back("-lSystem");
// Select the dynamic runtime library and the target specific static library.
- if (isTargetWatchOSBased()) {
- // We currently always need a static runtime library for watchOS.
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.watchos.a");
- } else if (isTargetTvOSBased()) {
- // We currently always need a static runtime library for tvOS.
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.tvos.a");
- } else if (isTargetIOSBased()) {
+ if (isTargetIOSBased()) {
// If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
// it never went into the SDK.
// Linking against libgcc_s.1 isn't needed for iOS 5.0+
if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator() &&
getTriple().getArch() != llvm::Triple::aarch64)
CmdArgs.push_back("-lgcc_s.1");
-
- // We currently always need a static runtime library for iOS.
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ios.a");
- } else {
- assert(isTargetMacOS() && "unexpected non MacOS platform");
- // The dynamic runtime library was merged with libSystem for 10.6 and
- // beyond; only 10.4 and 10.5 need an additional runtime library.
- if (isMacosxVersionLT(10, 5))
- CmdArgs.push_back("-lgcc_s.10.4");
- else if (isMacosxVersionLT(10, 6))
- CmdArgs.push_back("-lgcc_s.10.5");
-
- // Originally for OS X, we thought we would only need a static runtime
- // library when targeting 10.4, to provide versions of the static functions
- // which were omitted from 10.4.dylib. This led to the creation of the 10.4
- // builtins library.
- //
- // Unfortunately, that turned out to not be true, because Darwin system
- // headers can still use eprintf on i386, and it is not exported from
- // libSystem. Therefore, we still must provide a runtime library just for
- // the tiny tiny handful of projects that *might* use that symbol.
- //
- // Then over time, we figured out it was useful to add more things to the
- // runtime so we created libclang_rt.osx.a to provide new functions when
- // deploying to old OS builds, and for a long time we had both eprintf and
- // osx builtin libraries. Which just seems excessive. So with PR 28855, we
- // are removing the eprintf library and expecting eprintf to be provided by
- // the OS X builtins library.
- if (isMacosxVersionLT(10, 5))
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.10.4.a");
- else
- AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.osx.a");
}
+ AddLinkRuntimeLib(Args, CmdArgs, "builtins");
}
/// Returns the most appropriate macOS target version for the current process.
@@ -1314,6 +1294,18 @@ struct DarwinPlatform {
return DarwinPlatform(InferredFromArch, getPlatformFromOS(OS), Value);
}
+ /// Constructs an inferred SDKInfo value based on the version inferred from
+ /// the SDK path itself. Only works for values that were created by inferring
+ /// the platform from the SDKPath.
+ DarwinSDKInfo inferSDKInfo() {
+ assert(Kind == InferredFromSDK && "can infer SDK info only");
+ llvm::VersionTuple Version;
+ bool IsValid = !Version.tryParse(OSVersion);
+ (void)IsValid;
+ assert(IsValid && "invalid SDK version");
+ return DarwinSDKInfo(Version);
+ }
+
private:
DarwinPlatform(SourceKind Kind, DarwinPlatformKind Platform, Arg *Argument)
: Kind(Kind), Platform(Platform), Argument(Argument) {}
@@ -1447,8 +1439,11 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
}
/// Tries to infer the deployment target from the SDK specified by -isysroot
-/// (or SDKROOT).
-Optional<DarwinPlatform> inferDeploymentTargetFromSDK(DerivedArgList &Args) {
+/// (or SDKROOT). Uses the version specified in the SDKSettings.json file if
+/// it's available.
+Optional<DarwinPlatform>
+inferDeploymentTargetFromSDK(DerivedArgList &Args,
+ const Optional<DarwinSDKInfo> &SDKInfo) {
const Arg *A = Args.getLastArg(options::OPT_isysroot);
if (!A)
return None;
@@ -1456,28 +1451,37 @@ Optional<DarwinPlatform> inferDeploymentTargetFromSDK(DerivedArgList &Args) {
StringRef SDK = Darwin::getSDKName(isysroot);
if (!SDK.size())
return None;
- // Slice the version number out.
- // Version number is between the first and the last number.
- size_t StartVer = SDK.find_first_of("0123456789");
- size_t EndVer = SDK.find_last_of("0123456789");
- if (StartVer != StringRef::npos && EndVer > StartVer) {
- StringRef Version = SDK.slice(StartVer, EndVer + 1);
- if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::IPhoneOS, Version,
- /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
- else if (SDK.startswith("MacOSX"))
- return DarwinPlatform::createFromSDK(Darwin::MacOS,
- getSystemOrSDKMacOSVersion(Version));
- else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::WatchOS, Version,
- /*IsSimulator=*/SDK.startswith("WatchSimulator"));
- else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::TvOS, Version,
- /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
- }
+
+ std::string Version;
+ if (SDKInfo) {
+ // Get the version from the SDKSettings.json if it's available.
+ Version = SDKInfo->getVersion().getAsString();
+ } else {
+ // Slice the version number out.
+ // Version number is between the first and the last number.
+ size_t StartVer = SDK.find_first_of("0123456789");
+ size_t EndVer = SDK.find_last_of("0123456789");
+ if (StartVer != StringRef::npos && EndVer > StartVer)
+ Version = SDK.slice(StartVer, EndVer + 1);
+ }
+ if (Version.empty())
+ return None;
+
+ if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::IPhoneOS, Version,
+ /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
+ else if (SDK.startswith("MacOSX"))
+ return DarwinPlatform::createFromSDK(Darwin::MacOS,
+ getSystemOrSDKMacOSVersion(Version));
+ else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::WatchOS, Version,
+ /*IsSimulator=*/SDK.startswith("WatchSimulator"));
+ else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::TvOS, Version,
+ /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
return None;
}
@@ -1552,6 +1556,22 @@ Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
Args.getLastArg(options::OPT_target));
}
+Optional<DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS,
+ const ArgList &Args,
+ const Driver &TheDriver) {
+ const Arg *A = Args.getLastArg(options::OPT_isysroot);
+ if (!A)
+ return None;
+ StringRef isysroot = A->getValue();
+ auto SDKInfoOrErr = driver::parseDarwinSDKInfo(VFS, isysroot);
+ if (!SDKInfoOrErr) {
+ llvm::consumeError(SDKInfoOrErr.takeError());
+ TheDriver.Diag(diag::warn_drv_darwin_sdk_invalid_settings);
+ return None;
+ }
+ return *SDKInfoOrErr;
+}
+
} // namespace
void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
@@ -1576,6 +1596,10 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
+ // Read the SDKSettings.json file for more information, like the SDK version
+ // that we can pass down to the compiler.
+ SDKInfo = parseSDKSettings(getVFS(), Args, getDriver());
+
// The OS and the version can be specified using the -target argument.
Optional<DarwinPlatform> OSTarget =
getDeploymentTargetFromTargetArg(Args, getTriple(), getDriver());
@@ -1621,16 +1645,22 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
getDeploymentTargetFromEnvironmentVariables(getDriver(), getTriple());
if (OSTarget) {
// Don't infer simulator from the arch when the SDK is also specified.
- Optional<DarwinPlatform> SDKTarget = inferDeploymentTargetFromSDK(Args);
+ Optional<DarwinPlatform> SDKTarget =
+ inferDeploymentTargetFromSDK(Args, SDKInfo);
if (SDKTarget)
OSTarget->setEnvironment(SDKTarget->getEnvironment());
}
}
// If there is no command-line argument to specify the Target version and
// no environment variable defined, see if we can set the default based
- // on -isysroot.
- if (!OSTarget)
- OSTarget = inferDeploymentTargetFromSDK(Args);
+ // on -isysroot using SDKSettings.json if it exists.
+ if (!OSTarget) {
+ OSTarget = inferDeploymentTargetFromSDK(Args, SDKInfo);
+ /// If the target was successfully constructed from the SDK path, try to
+ /// infer the SDK info if the SDK doesn't have it.
+ if (OSTarget && !SDKInfo)
+ SDKInfo = OSTarget->inferSDKInfo();
+ }
// If no OS targets have been specified, try to guess platform from -target
// or arch name and compute the version from the triple.
if (!OSTarget)
@@ -1709,6 +1739,39 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
}
}
+void DarwinClang::AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // The implementation from a base class will pass through the -stdlib to
+ // CC1Args.
+ // FIXME: this should not be necessary, remove usages in the frontend
+ // (e.g. HeaderSearchOptions::UseLibcxx) and don't pipe -stdlib.
+ ToolChain::AddClangCXXStdlibIncludeArgs(DriverArgs, CC1Args);
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ llvm::StringRef InstallDir = getDriver().getInstalledDir();
+ if (InstallDir.empty())
+ break;
+ // On Darwin, libc++ may be installed alongside the compiler in
+ // include/c++/v1.
+ // Get from 'foo/bin' to 'foo/include/c++/v1'.
+ SmallString<128> P = InstallDir;
+ // Note that InstallDir can be relative, so we have to '..' and not
+ // parent_path.
+ llvm::sys::path::append(P, "..", "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ break;
+ }
+ case ToolChain::CST_Libstdcxx:
+ // FIXME: should we do something about it?
+ break;
+ }
+}
void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
@@ -1962,12 +2025,8 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
else if (Name == "pentIIm3")
DAL->AddJoinedArg(nullptr, MArch, "pentium2");
- else if (Name == "x86_64")
- DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_m64));
- else if (Name == "x86_64h") {
+ else if (Name == "x86_64" || Name == "x86_64h")
DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_m64));
- DAL->AddJoinedArg(nullptr, MArch, "x86_64h");
- }
else if (Name == "arm")
DAL->AddJoinedArg(nullptr, MArch, "armv4t");
@@ -2001,12 +2060,12 @@ void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
// Embedded targets are simple at the moment, not supporting sanitizers and
// with different libraries for each member of the product { static, PIC } x
// { hard-float, soft-float }
- llvm::SmallString<32> CompilerRT = StringRef("libclang_rt.");
+ llvm::SmallString<32> CompilerRT = StringRef("");
CompilerRT +=
(tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard)
? "hard"
: "soft";
- CompilerRT += Args.hasArg(options::OPT_fPIC) ? "_pic.a" : "_static.a";
+ CompilerRT += Args.hasArg(options::OPT_fPIC) ? "_pic" : "_static";
AddLinkRuntimeLib(Args, CmdArgs, CompilerRT, RLO_IsEmbedded);
}
@@ -2035,8 +2094,21 @@ bool Darwin::isAlignedAllocationUnavailable() const {
void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const {
- if (isAlignedAllocationUnavailable())
+ // Pass "-faligned-alloc-unavailable" only when the user hasn't manually
+ // enabled or disabled aligned allocations.
+ if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
+ options::OPT_fno_aligned_allocation) &&
+ isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
+
+ if (SDKInfo) {
+ /// Pass the SDK version to the compiler when the SDK information is
+ /// available.
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-target-sdk-version=" << SDKInfo->getVersion();
+ CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ }
}
DerivedArgList *
@@ -2131,7 +2203,7 @@ llvm::ExceptionHandling Darwin::GetExceptionModel(const ArgList &Args) const {
// Only watchOS uses the new DWARF/Compact unwinding method.
llvm::Triple Triple(ComputeLLVMTriple(Args));
- if(Triple.isWatchABI())
+ if (Triple.isWatchABI())
return llvm::ExceptionHandling::DwarfCFI;
return llvm::ExceptionHandling::SjLj;
@@ -2268,8 +2340,7 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
}
if (!isTargetIPhoneOS() && Args.hasArg(options::OPT_shared_libgcc) &&
- !isTargetWatchOS() &&
- isMacosxVersionLT(10, 5)) {
+ !isTargetWatchOS() && isMacosxVersionLT(10, 5)) {
const char *Str = Args.MakeArgString(GetFilePath("crt3.o"));
CmdArgs.push_back(Str);
}
@@ -2290,10 +2361,15 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
Res |= SanitizerKind::Function;
+
+ // Prior to 10.9, macOS shipped a version of the C++ standard library without
+ // C++11 support. The same is true of iOS prior to version 5. These OS'es are
+ // incompatible with -fsanitize=vptr.
+ if (!(isTargetMacOS() && isMacosxVersionLT(10, 9))
+ && !(isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0)))
+ Res |= SanitizerKind::Vptr;
+
if (isTargetMacOS()) {
- if (!isMacosxVersionLT(10, 9))
- Res |= SanitizerKind::Vptr;
- Res |= SanitizerKind::SafeStack;
if (IsX86_64)
Res |= SanitizerKind::Thread;
} else if (isTargetIOSSimulator() || isTargetTvOSSimulator()) {
diff --git a/lib/Driver/ToolChains/Darwin.h b/lib/Driver/ToolChains/Darwin.h
index 87d553bd7e0b..d753f8967a61 100644
--- a/lib/Driver/ToolChains/Darwin.h
+++ b/lib/Driver/ToolChains/Darwin.h
@@ -11,8 +11,10 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
#include "Cuda.h"
+#include "clang/Driver/DarwinSDKInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/XRayArgs.h"
namespace clang {
namespace driver {
@@ -189,9 +191,9 @@ public:
/// Add a runtime library to the list of items to link.
void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- StringRef DarwinLibName,
- RuntimeLinkOptions Opts = RuntimeLinkOptions()) const;
+ llvm::opt::ArgStringList &CmdArgs, StringRef Component,
+ RuntimeLinkOptions Opts = RuntimeLinkOptions(),
+ bool IsShared = false) const;
/// Add any profiling runtime libraries that are needed. This is essentially a
/// MachO specific version of addProfileRT in Tools.cpp.
@@ -252,6 +254,10 @@ public:
return llvm::ExceptionHandling::None;
}
+ virtual StringRef getOSLibraryNameSuffix(bool IgnoreSim = false) const {
+ return "";
+ }
+
/// }
};
@@ -283,6 +289,9 @@ public:
/// The OS version we are targeting.
mutable VersionTuple TargetVersion;
+ /// The information about the darwin SDK that was used.
+ mutable Optional<DarwinSDKInfo> SDKInfo;
+
CudaInstallationDetector CudaInstallation;
private:
@@ -418,7 +427,7 @@ protected:
Action::OffloadKind DeviceOffloadKind) const override;
StringRef getPlatformFamily() const;
- StringRef getOSLibraryNameSuffix() const;
+ StringRef getOSLibraryNameSuffix(bool IgnoreSim = false) const override;
public:
static StringRef getSDKName(StringRef isysroot);
@@ -489,6 +498,10 @@ public:
void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/FreeBSD.cpp b/lib/Driver/ToolChains/FreeBSD.cpp
index c16eabf06961..7a176d260aee 100644
--- a/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/lib/Driver/ToolChains/FreeBSD.cpp
@@ -12,11 +12,11 @@
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/lib/Driver/ToolChains/Fuchsia.cpp b/lib/Driver/ToolChains/Fuchsia.cpp
index 54c34ff159b1..de2c7411c5e4 100644
--- a/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/lib/Driver/ToolChains/Fuchsia.cpp
@@ -76,10 +76,11 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else if (Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-shared");
+ const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs();
+
if (!Args.hasArg(options::OPT_shared)) {
std::string Dyld = D.DyldPrefix;
- if (ToolChain.getSanitizerArgs().needsAsanRt() &&
- ToolChain.getSanitizerArgs().needsSharedRt())
+ if (SanArgs.needsAsanRt() && SanArgs.needsSharedRt())
Dyld += "asan/";
Dyld += "ld.so.1";
CmdArgs.push_back("-dynamic-linker");
@@ -98,6 +99,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
+ addSanitizerPathLibArgs(ToolChain, Args, CmdArgs);
+
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
@@ -106,8 +109,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.getLTOMode() == LTOK_Thin);
}
- addSanitizerRuntimes(ToolChain, Args, CmdArgs);
-
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
ToolChain.addProfileRTLibs(Args, CmdArgs);
@@ -119,15 +122,24 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (ToolChain.ShouldLinkCXXStdlib(Args)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
+ CmdArgs.push_back("--push-state");
+ CmdArgs.push_back("--as-needed");
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bstatic");
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ CmdArgs.push_back("--pop-state");
}
- CmdArgs.push_back("-lm");
}
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(ToolChain, CmdArgs);
+
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
if (Args.hasArg(options::OPT_pthread) ||
diff --git a/lib/Driver/ToolChains/Gnu.cpp b/lib/Driver/ToolChains/Gnu.cpp
index 3755673250b2..2ad45097dce8 100644
--- a/lib/Driver/ToolChains/Gnu.cpp
+++ b/lib/Driver/ToolChains/Gnu.cpp
@@ -16,7 +16,6 @@
#include "Arch/SystemZ.h"
#include "CommonArgs.h"
#include "Linux.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -27,6 +26,7 @@
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -228,6 +228,30 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
+// On Arm the endianness of the output file is determined by the target and
+// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
+// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
+// normalized triple so we must handle the flag here.
+static bool isArmBigEndian(const llvm::Triple &Triple,
+ const ArgList &Args) {
+ bool IsBigEndian = false;
+ switch (Triple.getArch()) {
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ IsBigEndian = true;
+ LLVM_FALLTHROUGH;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian))
+ IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ break;
+ default:
+ break;
+ }
+ return IsBigEndian;
+}
+
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
@@ -237,13 +261,12 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::aarch64:
return "aarch64linux";
case llvm::Triple::aarch64_be:
- return "aarch64_be_linux";
+ return "aarch64linuxb";
case llvm::Triple::arm:
case llvm::Triple::thumb:
- return "armelf_linux_eabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- return "armelfb_linux_eabi";
+ return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
case llvm::Triple::ppc:
return "elf32ppclinux";
case llvm::Triple::ppc64:
@@ -264,11 +287,13 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::mipsel:
return "elf32ltsmip";
case llvm::Triple::mips64:
- if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ if (tools::mips::hasMipsAbiArg(Args, "n32") ||
+ T.getEnvironment() == llvm::Triple::GNUABIN32)
return "elf32btsmipn32";
return "elf64btsmip";
case llvm::Triple::mips64el:
- if (tools::mips::hasMipsAbiArg(Args, "n32"))
+ if (tools::mips::hasMipsAbiArg(Args, "n32") ||
+ T.getEnvironment() == llvm::Triple::GNUABIN32)
return "elf32ltsmipn32";
return "elf64ltsmip";
case llvm::Triple::systemz:
@@ -323,14 +348,6 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
- const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- if (llvm::sys::path::stem(Exec) == "lld") {
- CmdArgs.push_back("-flavor");
- CmdArgs.push_back("old-gnu");
- CmdArgs.push_back("-target");
- CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
- }
-
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
@@ -343,8 +360,13 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
- if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
- arm::appendEBLinkFlags(Args, CmdArgs, Triple);
+ if (Triple.isARM() || Triple.isThumb() || Triple.isAArch64()) {
+ bool IsBigEndian = isArmBigEndian(Triple, Args);
+ if (IsBigEndian)
+ arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
+ IsBigEndian = IsBigEndian || Arch == llvm::Triple::aarch64_be;
+ CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ }
// Most Android ARM64 targets should enable the linker fix for erratum
// 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
@@ -539,6 +561,7 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddHIPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA,
*this);
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -645,6 +668,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
const llvm::Triple &Triple2 = getToolChain().getTriple();
+ CmdArgs.push_back(isArmBigEndian(Triple2, Args) ? "-EB" : "-EL");
switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
@@ -677,6 +701,8 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
}
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be: {
+ CmdArgs.push_back(
+ getToolChain().getArch() == llvm::Triple::aarch64_be ? "-EB" : "-EL");
Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
normalizeCPUNamesForAssembler(Args, CmdArgs);
@@ -791,17 +817,17 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
if (Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux())
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0]));
+ SplitDebugName(Args, Output));
}
namespace {
// Filter to remove Multilibs that don't exist as a suffix to Path
class FilterNonExistent {
StringRef Base, File;
- vfs::FileSystem &VFS;
+ llvm::vfs::FileSystem &VFS;
public:
- FilterNonExistent(StringRef Base, StringRef File, vfs::FileSystem &VFS)
+ FilterNonExistent(StringRef Base, StringRef File, llvm::vfs::FileSystem &VFS)
: Base(Base), File(File), VFS(VFS) {}
bool operator()(const Multilib &M) {
return !VFS.exists(Base + M.gccSuffix() + File);
@@ -852,6 +878,10 @@ static bool isRISCV(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
}
+static bool isMSP430(llvm::Triple::ArchType Arch) {
+ return Arch == llvm::Triple::msp430;
+}
+
static Multilib makeMultilib(StringRef commonSuffix) {
return Multilib(commonSuffix, commonSuffix, commonSuffix);
}
@@ -947,7 +977,7 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
return false;
}
-static bool findMipsAndroidMultilibs(vfs::FileSystem &VFS, StringRef Path,
+static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
@@ -1397,6 +1427,26 @@ static void findAndroidArmMultilibs(const Driver &D,
Result.Multilibs = AndroidArmMultilibs;
}
+static bool findMSP430Multilibs(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
+ Multilib MSP430Multilib = makeMultilib("/430");
+ // FIXME: when clang starts to support msp430x ISA additional logic
+ // to select between multilib must be implemented
+ // Multilib MSP430xMultilib = makeMultilib("/large");
+
+ Result.Multilibs.push_back(MSP430Multilib);
+ Result.Multilibs.FilterOut(NonExistent);
+
+ Multilib::flags_list Flags;
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ return true;
+
+ return false;
+}
+
static void findRISCVMultilibs(const Driver &D,
const llvm::Triple &TargetTriple, StringRef Path,
const ArgList &Args, DetectedMultilibs &Result) {
@@ -1625,10 +1675,18 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
return GoodVersion;
}
-static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
+static llvm::StringRef getGCCToolchainDir(const ArgList &Args,
+ llvm::StringRef SysRoot) {
const Arg *A = Args.getLastArg(clang::driver::options::OPT_gcc_toolchain);
if (A)
return A->getValue();
+
+ // If we have a SysRoot, ignore GCC_INSTALL_PREFIX.
+ // GCC_INSTALL_PREFIX specifies the gcc installation for the default
+ // sysroot and is likely not valid with a different sysroot.
+ if (!SysRoot.empty())
+ return "";
+
return GCC_INSTALL_PREFIX;
}
@@ -1660,7 +1718,7 @@ void Generic_GCC::GCCInstallationDetector::init(
SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
D.PrefixDirs.end());
- StringRef GCCToolchainDir = getGCCToolchainDir(Args);
+ StringRef GCCToolchainDir = getGCCToolchainDir(Args, D.SysRoot);
if (GCCToolchainDir != "") {
if (GCCToolchainDir.back() == '/')
GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
@@ -1769,9 +1827,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
std::string PrefixDir = SysRoot.str() + "/usr/gcc";
std::error_code EC;
- for (vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC), LE;
+ for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC),
+ LE;
!EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
// Filter out obviously bad entries.
@@ -1812,19 +1871,21 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
"aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
- "aarch64-suse-linux"};
+ "aarch64-suse-linux", "aarch64-linux-android"};
static const char *const AArch64beLibDirs[] = {"/lib"};
static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
"aarch64_be-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
- static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
+ static const char *const ARMTriples[] = {"arm-linux-gnueabi",
+ "arm-linux-androideabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
- static const char *const ARMebTriples[] = {"armeb-linux-gnueabi"};
+ static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
+ "armeb-linux-androideabi"};
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
@@ -1835,32 +1896,47 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
- "x86_64-amazon-linux"};
+ "x86_64-amazon-linux", "x86_64-linux-android"};
static const char *const X32LibDirs[] = {"/libx32"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
"i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
"i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
"i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
- "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu"};
+ "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu",
+ "i686-linux-android", "i386-gnu", "i486-gnu",
+ "i586-gnu", "i686-gnu"};
static const char *const MIPSLibDirs[] = {"/lib"};
- static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
- "mips-mti-linux-gnu",
- "mips-img-linux-gnu"};
+ static const char *const MIPSTriples[] = {
+ "mips-linux-gnu", "mips-mti-linux", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mipsisa32r6-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/lib"};
- static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
- "mips-img-linux-gnu"};
+ static const char *const MIPSELTriples[] = {
+ "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu",
+ "mipsel-linux-android"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
- "mips64-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
- "mips64-linux-gnuabi64"};
+ "mips64-linux-gnu", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mips64-linux-gnuabi64",
+ "mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64"};
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
- "mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
- "mips64el-linux-gnuabi64"};
+ "mips64el-linux-gnu", "mips-mti-linux-gnu",
+ "mips-img-linux-gnu", "mips64el-linux-gnuabi64",
+ "mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64",
+ "mips64el-linux-android"};
+
+ static const char *const MIPSN32LibDirs[] = {"/lib32"};
+ static const char *const MIPSN32Triples[] = {"mips64-linux-gnuabin32",
+ "mipsisa64r6-linux-gnuabin32"};
+ static const char *const MIPSN32ELLibDirs[] = {"/lib32"};
+ static const char *const MIPSN32ELTriples[] = {
+ "mips64el-linux-gnuabin32", "mipsisa64r6el-linux-gnuabin32"};
+ static const char *const MSP430LibDirs[] = {"/lib"};
+ static const char *const MSP430Triples[] = {"msp430-elf"};
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
@@ -2057,6 +2133,8 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
+ BiarchLibDirs.append(begin(MIPSN32LibDirs), end(MIPSN32LibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32Triples), end(MIPSN32Triples));
break;
case llvm::Triple::mipsel:
LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
@@ -2064,20 +2142,30 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSN32ELLibDirs), end(MIPSN32ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32ELTriples), end(MIPSN32ELTriples));
break;
case llvm::Triple::mips64:
LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
TripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
BiarchLibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPSN32LibDirs), end(MIPSN32LibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32Triples), end(MIPSN32Triples));
break;
case llvm::Triple::mips64el:
LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchLibDirs.append(begin(MIPSN32ELLibDirs), end(MIPSN32ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSN32ELTriples), end(MIPSN32ELTriples));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
+ case llvm::Triple::msp430:
+ LibDirs.append(begin(MSP430LibDirs), end(MSP430LibDirs));
+ TripleAliases.append(begin(MSP430Triples), end(MSP430Triples));
+ break;
case llvm::Triple::ppc:
LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
TripleAliases.append(begin(PPCTriples), end(PPCTriples));
@@ -2149,6 +2237,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
return false;
} else if (isRISCV(TargetArch)) {
findRISCVMultilibs(D, TargetTriple, Path, Args, Detected);
+ } else if (isMSP430(TargetArch)) {
+ findMSP430Multilibs(D, TargetTriple, Path, Args, Detected);
} else if (!findBiarchMultilibs(D, TargetTriple, Path, Args,
NeedsBiarchSuffix, Detected)) {
return false;
@@ -2204,6 +2294,9 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// triple.
{"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
(TargetArch == llvm::Triple::x86 &&
+ TargetTriple.getOS() != llvm::Triple::Solaris)},
+ {"i386-gnu/gcc/" + CandidateTriple.str(), "../../..",
+ (TargetArch == llvm::Triple::x86 &&
TargetTriple.getOS() != llvm::Triple::Solaris)}};
for (auto &Suffix : Suffixes) {
@@ -2212,21 +2305,21 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
StringRef LibSuffix = Suffix.LibSuffix;
std::error_code EC;
- for (vfs::directory_iterator
+ for (llvm::vfs::directory_iterator
LI = D.getVFS().dir_begin(LibDir + "/" + LibSuffix, EC),
LE;
!EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->getName());
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
if (CandidateVersion.Major != -1) // Filter obviously bad entries.
- if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
+ if (!CandidateGCCInstallPaths.insert(LI->path()).second)
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
if (CandidateVersion <= Version)
continue;
- if (!ScanGCCForMultilibs(TargetTriple, Args, LI->getName(),
+ if (!ScanGCCForMultilibs(TargetTriple, Args, LI->path(),
NeedsBiarchSuffix))
continue;
@@ -2375,7 +2468,7 @@ bool Generic_GCC::isPICDefault() const {
case llvm::Triple::x86_64:
return getTriple().isOSWindows();
case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
+ // Big endian PPC is PIC by default
return !getTriple().isOSBinFormatMachO() && !getTriple().isMacOSX();
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
@@ -2412,16 +2505,14 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::systemz:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return true;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
- // Enabled for Debian, Android, FreeBSD and OpenBSD mips64/mipsel, as they
- // can precisely identify the ABI in use (Debian) or only use N64 for MIPS64
- // (Android). Other targets are unable to distinguish N32 from N64.
- if (getTriple().getEnvironment() == llvm::Triple::GNUABI64 ||
- getTriple().isAndroid() ||
- getTriple().isOSFreeBSD() ||
- getTriple().isOSOpenBSD())
+ case llvm::Triple::msp430:
+ return true;
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9:
+ if (getTriple().isOSSolaris() || getTriple().isOSOpenBSD())
return true;
return false;
default:
@@ -2546,7 +2637,7 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
bool UseInitArrayDefault =
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
- (getTriple().getOS() == llvm::Triple::FreeBSD &&
+ (getTriple().isOSFreeBSD() &&
getTriple().getOSMajorVersion() >= 12) ||
(getTriple().getOS() == llvm::Triple::Linux &&
((!GCCInstallation.isValid() || !V.isOlderThan(4, 7, 0)) ||
@@ -2554,7 +2645,9 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
getTriple().getOS() == llvm::Triple::NaCl ||
(getTriple().getVendor() == llvm::Triple::MipsTechnologies &&
!getTriple().hasEnvironment()) ||
- getTriple().getOS() == llvm::Triple::Solaris;
+ getTriple().getOS() == llvm::Triple::Solaris ||
+ getTriple().getArch() == llvm::Triple::riscv32 ||
+ getTriple().getArch() == llvm::Triple::riscv64;
if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, UseInitArrayDefault))
diff --git a/lib/Driver/ToolChains/HIP.cpp b/lib/Driver/ToolChains/HIP.cpp
index 03acf45a9b31..868765cf88e5 100644
--- a/lib/Driver/ToolChains/HIP.cpp
+++ b/lib/Driver/ToolChains/HIP.cpp
@@ -24,6 +24,12 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+#if _WIN32 || _WIN64
+#define NULL_FILE "nul"
+#else
+#define NULL_FILE "/dev/null"
+#endif
+
namespace {
static void addBCLib(Compilation &C, const ArgList &Args,
@@ -81,8 +87,8 @@ const char *AMDGCN::Linker::constructLLVMLinkCommand(
else
FlushDenormalControlBC = "oclc_daz_opt_off.amdgcn.bc";
- BCLibs.append({"opencl.amdgcn.bc",
- "ocml.amdgcn.bc", "ockl.amdgcn.bc", "irif.amdgcn.bc",
+ BCLibs.append({"hip.amdgcn.bc", "opencl.amdgcn.bc",
+ "ocml.amdgcn.bc", "ockl.amdgcn.bc",
"oclc_finite_only_off.amdgcn.bc",
FlushDenormalControlBC,
"oclc_correctly_rounded_sqrt_on.amdgcn.bc",
@@ -154,7 +160,7 @@ const char *AMDGCN::Linker::constructLlcCommand(
llvm::StringRef OutputFilePrefix, const char *InputFileName) const {
// Construct llc command.
ArgStringList LlcArgs{InputFileName, "-mtriple=amdgcn-amd-amdhsa",
- "-filetype=obj",
+ "-filetype=obj", "-mattr=-code-object-v3",
Args.MakeArgString("-mcpu=" + SubArchName), "-o"};
std::string LlcOutputFileName =
C.getDriver().GetTemporaryPath(OutputFilePrefix, "o");
@@ -184,6 +190,40 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Lld, LldArgs, Inputs));
}
+// Construct a clang-offload-bundler command to bundle code objects for
+// different GPU's into a HIP fat binary.
+void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, const Tool& T) {
+ // Construct clang-offload-bundler command to bundle object files for
+ // for different GPU archs.
+ ArgStringList BundlerArgs;
+ BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+
+ // ToDo: Remove the dummy host binary entry which is required by
+ // clang-offload-bundler.
+ std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
+ std::string BundlerInputArg = "-inputs=" NULL_FILE;
+
+ for (const auto &II : Inputs) {
+ const auto* A = II.getAction();
+ BundlerTargetArg = BundlerTargetArg + ",hip-amdgcn-amd-amdhsa-" +
+ StringRef(A->getOffloadingArch()).str();
+ BundlerInputArg = BundlerInputArg + "," + II.getFilename();
+ }
+ BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+
+ auto BundlerOutputArg =
+ Args.MakeArgString(std::string("-outputs=").append(OutputFileName));
+ BundlerArgs.push_back(BundlerOutputArg);
+
+ SmallString<128> BundlerPath(C.getDriver().Dir);
+ llvm::sys::path::append(BundlerPath, "clang-offload-bundler");
+ const char *Bundler = Args.MakeArgString(BundlerPath);
+ C.addCommand(llvm::make_unique<Command>(JA, T, Bundler, BundlerArgs, Inputs));
+}
+
// For amdgcn the inputs of the linker job are device bitcode and output is
// object file. It calls llvm-link, opt, llc, then lld steps.
void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -192,6 +232,9 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
+ if (JA.getType() == types::TY_HIP_FATBIN)
+ return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
+
assert(getToolChain().getTriple().getArch() == llvm::Triple::amdgcn &&
"Unsupported target");
@@ -244,9 +287,15 @@ void HIPToolChain::addClangTargetOptions(
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
- if (DriverArgs.hasFlag(options::OPT_fcuda_rdc, options::OPT_fno_cuda_rdc,
+ if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
false))
- CC1Args.push_back("-fcuda-rdc");
+ CC1Args.push_back("-fgpu-rdc");
+
+ // Default to "hidden" visibility, as object level linking will not be
+ // supported for the foreseeable future.
+ if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat))
+ CC1Args.append({"-fvisibility", "hidden"});
}
llvm::opt::DerivedArgList *
diff --git a/lib/Driver/ToolChains/HIP.h b/lib/Driver/ToolChains/HIP.h
index 40c9128e2f59..3af19d44dae0 100644
--- a/lib/Driver/ToolChains/HIP.h
+++ b/lib/Driver/ToolChains/HIP.h
@@ -19,6 +19,11 @@ namespace driver {
namespace tools {
namespace AMDGCN {
+ // Construct command for creating HIP fatbin.
+ void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs, const Tool& T);
+
// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
// device library, then compiles it to ISA in a shared object.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
diff --git a/lib/Driver/ToolChains/Hexagon.cpp b/lib/Driver/ToolChains/Hexagon.cpp
index c2b27b6d9ac6..d302a3e24d8b 100644
--- a/lib/Driver/ToolChains/Hexagon.cpp
+++ b/lib/Driver/ToolChains/Hexagon.cpp
@@ -8,9 +8,8 @@
//===----------------------------------------------------------------------===//
#include "Hexagon.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
+#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -19,6 +18,7 @@
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -32,6 +32,7 @@ static StringRef getDefaultHvxLength(StringRef Cpu) {
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
+ .Case("v66", "128b")
.Default("128b");
}
@@ -75,7 +76,7 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
// Handle -mhvx-length=.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
- // These falgs are valid only if HVX in enabled.
+ // These flags are valid only if HVX in enabled.
if (!HasHVX)
D.Diag(diag::err_drv_invalid_hvx_length);
else if (A->getOption().matches(options::OPT_mhexagon_hvx_length_EQ))
@@ -369,9 +370,8 @@ void hexagon::Linker::ConstructJob(Compilation &C, const JobAction &JA,
constructHexagonLinkArgs(C, JA, HTC, Output, Inputs, Args, CmdArgs,
LinkingOutput);
- std::string Linker = HTC.GetProgramPath("hexagon-link");
- C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
- CmdArgs, Inputs));
+ const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
// Hexagon tools end.
@@ -513,11 +513,6 @@ unsigned HexagonToolChain::getOptimizationLevel(
void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
- if (!DriverArgs.hasArg(options::OPT_ffp_contract)) {
- unsigned OptLevel = getOptimizationLevel(DriverArgs);
- if (OptLevel >= 3)
- CC1Args.push_back("-ffp-contract=fast");
- }
if (DriverArgs.hasArg(options::OPT_ffixed_r19)) {
CC1Args.push_back("-target-feature");
CC1Args.push_back("+reserved-r19");
diff --git a/lib/Driver/ToolChains/Hexagon.h b/lib/Driver/ToolChains/Hexagon.h
index e43b8a5b8800..a9e599de7ae5 100644
--- a/lib/Driver/ToolChains/Hexagon.h
+++ b/lib/Driver/ToolChains/Hexagon.h
@@ -81,6 +81,9 @@ public:
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
+ const char *getDefaultLinker() const override { return "hexagon-link"; }
+
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
diff --git a/lib/Driver/ToolChains/Hurd.cpp b/lib/Driver/ToolChains/Hurd.cpp
new file mode 100644
index 000000000000..ff7b685dae3f
--- /dev/null
+++ b/lib/Driver/ToolChains/Hurd.cpp
@@ -0,0 +1,169 @@
+//===--- Hurd.cpp - Hurd ToolChain Implementations --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Hurd.h"
+#include "CommonArgs.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+
+using tools::addPathIfExists;
+
+/// Get our best guess at the multiarch triple for a target.
+///
+/// Debian-based systems are starting to use a multiarch setup where they use
+/// a target-triple directory in the library and header search paths.
+/// Unfortunately, this triple does not align with the vanilla target triple,
+/// so we provide a rough mapping here.
+static std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) {
+ if (TargetTriple.getArch() == llvm::Triple::x86) {
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common hurd triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ if (D.getVFS().exists(SysRoot + "/lib/i386-gnu"))
+ return "i386-gnu";
+ }
+
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ return TargetTriple.str();
+}
+
+static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
+ // It happens that only x86 and PPC use the 'lib32' variant of oslibdir, and
+ // using that variant while targeting other architectures causes problems
+ // because the libraries are laid out in shared system roots that can't cope
+ // with a 'lib32' library search path being considered. So we only enable
+ // them when we know we may need it.
+ //
+ // FIXME: This is a bit of a hack. We should really unify this code for
+ // reasoning about oslibdir spellings with the lib dir spellings in the
+ // GCCInstallationDetector, but that is a more significant refactoring.
+
+ if (Triple.getArch() == llvm::Triple::x86)
+ return "lib32";
+
+ return Triple.isArch32Bit() ? "lib" : "lib64";
+}
+
+Hurd::Hurd(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ std::string SysRoot = computeSysRoot();
+ path_list &Paths = getFilePaths();
+
+ const std::string OSLibDir = getOSLibDir(Triple, Args);
+ const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
+
+ // If we are currently running Clang inside of the requested system root, add
+ // its parent library paths to those searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot)) {
+ addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
+ }
+
+ addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+
+ addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+
+ // If we are currently running Clang inside of the requested system root, add
+ // its parent library path to those searched.
+ // FIXME: It's not clear whether we should use the driver's installed
+ // directory ('Dir' below) or the ResourceDir.
+ if (StringRef(D.Dir).startswith(SysRoot))
+ addPathIfExists(D, D.Dir + "/../lib", Paths);
+
+ addPathIfExists(D, SysRoot + "/lib", Paths);
+ addPathIfExists(D, SysRoot + "/usr/lib", Paths);
+}
+
+bool Hurd::HasNativeLLVMSupport() const { return true; }
+
+Tool *Hurd::buildLinker() const { return new tools::gnutools::Linker(*this); }
+
+Tool *Hurd::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+std::string Hurd::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ return std::string();
+}
+
+std::string Hurd::getDynamicLinker(const ArgList &Args) const {
+ if (getArch() == llvm::Triple::x86)
+ return "/lib/ld.so";
+
+ llvm_unreachable("unsupported architecture");
+}
+
+void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> Dirs;
+ CIncludeDirs.split(Dirs, ":");
+ for (StringRef Dir : Dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(Dir) ? StringRef(SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + Dir);
+ }
+ return;
+ }
+
+ // Lacking those, try to detect the correct set of system includes for the
+ // target triple.
+ if (getTriple().getArch() == llvm::Triple::x86) {
+ std::string Path = SysRoot + "/usr/include/i386-gnu";
+ if (D.getVFS().exists(Path))
+ addExternCSystemInclude(DriverArgs, CC1Args, Path);
+ }
+
+ // Add an include of '/include' directly. This isn't provided by default by
+ // system GCCs, but is often used with cross-compiling GCCs, and harmless to
+ // add even when Clang is acting as-if it were a system compiler.
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
diff --git a/lib/Driver/ToolChains/Hurd.h b/lib/Driver/ToolChains/Hurd.h
new file mode 100644
index 000000000000..d14619f0e2ce
--- /dev/null
+++ b/lib/Driver/ToolChains/Hurd.h
@@ -0,0 +1,46 @@
+//===--- Hurd.h - Hurd ToolChain Implementations ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
+
+#include "Gnu.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY Hurd : public Generic_ELF {
+public:
+ Hurd(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool HasNativeLLVMSupport() const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ virtual std::string computeSysRoot() const;
+
+ virtual std::string getDynamicLinker(const llvm::opt::ArgList &Args) const;
+
+ std::vector<std::string> ExtraOpts;
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Hurd_H
diff --git a/lib/Driver/ToolChains/Linux.cpp b/lib/Driver/ToolChains/Linux.cpp
index f8f36239180c..65ab9b2daf54 100644
--- a/lib/Driver/ToolChains/Linux.cpp
+++ b/lib/Driver/ToolChains/Linux.cpp
@@ -13,7 +13,6 @@
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
@@ -23,6 +22,7 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <system_error>
using namespace clang::driver;
@@ -44,6 +44,7 @@ static std::string getMultiarchTriple(const Driver &D,
llvm::Triple::EnvironmentType TargetEnvironment =
TargetTriple.getEnvironment();
bool IsAndroid = TargetTriple.isAndroid();
+ bool IsMipsR6 = TargetTriple.getSubArch() == llvm::Triple::MipsSubArch_r6;
// For most architectures, just use whatever we have rather than trying to be
// clever.
@@ -101,30 +102,36 @@ static std::string getMultiarchTriple(const Driver &D,
if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
return "aarch64_be-linux-gnu";
break;
- case llvm::Triple::mips:
- if (D.getVFS().exists(SysRoot + "/lib/mips-linux-gnu"))
- return "mips-linux-gnu";
+ case llvm::Triple::mips: {
+ std::string Arch = IsMipsR6 ? "mipsisa32r6" : "mips";
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
+ return Arch + "-linux-gnu";
break;
- case llvm::Triple::mipsel:
+ }
+ case llvm::Triple::mipsel: {
if (IsAndroid)
return "mipsel-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/mipsel-linux-gnu"))
- return "mipsel-linux-gnu";
+ std::string Arch = IsMipsR6 ? "mipsisa32r6el" : "mipsel";
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-gnu"))
+ return Arch + "-linux-gnu";
break;
- case llvm::Triple::mips64:
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
- return "mips64-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnuabi64"))
- return "mips64-linux-gnuabi64";
+ }
+ case llvm::Triple::mips64: {
+ std::string Arch = IsMipsR6 ? "mipsisa64r6" : "mips64";
+ std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
+ return Arch + "-linux-" + ABI;
break;
- case llvm::Triple::mips64el:
+ }
+ case llvm::Triple::mips64el: {
if (IsAndroid)
return "mips64el-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
- return "mips64el-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnuabi64"))
- return "mips64el-linux-gnuabi64";
+ std::string Arch = IsMipsR6 ? "mipsisa64r6el" : "mips64el";
+ std::string ABI = llvm::Triple::getEnvironmentTypeName(TargetEnvironment);
+ if (D.getVFS().exists(SysRoot + "/lib/" + Arch + "-linux-" + ABI))
+ return Arch + "-linux-" + ABI;
break;
+ }
case llvm::Triple::ppc:
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
return "powerpc-linux-gnuspe";
@@ -210,6 +217,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
+ SelectedMultilib = GCCInstallation.getMultilib();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
@@ -228,16 +236,25 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
Distro Distro(D.getVFS());
- if (Distro.IsAlpineLinux()) {
+ if (Distro.IsAlpineLinux() || Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("now");
}
- if (Distro.IsOpenSUSE() || Distro.IsUbuntu() || Distro.IsAlpineLinux()) {
+ if (Distro.IsOpenSUSE() || Distro.IsUbuntu() || Distro.IsAlpineLinux() ||
+ Triple.isAndroid()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("relro");
}
+ // The lld default page size is too large for Aarch64, which produces much
+ // larger .so files and images for arm64 device targets. Use 4KB page size
+ // for Android arm64 targets instead.
+ if (Triple.isAArch64() && Triple.isAndroid()) {
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("max-page-size=4096");
+ }
+
if (GCCInstallation.getParentLibPath().find("opt/rh/devtoolset") !=
StringRef::npos)
// With devtoolset on RHEL, we want to add a bin directory that is relative
@@ -263,15 +280,18 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// and the MIPS ABI require .dynsym to be sorted in different ways.
// .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
// ABI requires a mapping between the GOT and the symbol table.
- // Android loader does not support .gnu.hash.
+ // Android loader does not support .gnu.hash until API 23.
// Hexagon linker/loader does not support .gnu.hash
- if (!IsMips && !IsAndroid && !IsHexagon) {
+ if (!IsMips && !IsHexagon) {
if (Distro.IsRedhat() || Distro.IsOpenSUSE() || Distro.IsAlpineLinux() ||
- (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick))
+ (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick) ||
+ (IsAndroid && !Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=gnu");
- if (Distro.IsDebian() || Distro.IsOpenSUSE() || Distro == Distro::UbuntuLucid ||
- Distro == Distro::UbuntuJaunty || Distro == Distro::UbuntuKarmic)
+ if (Distro.IsDebian() || Distro.IsOpenSUSE() ||
+ Distro == Distro::UbuntuLucid || Distro == Distro::UbuntuJaunty ||
+ Distro == Distro::UbuntuKarmic ||
+ (IsAndroid && Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=both");
}
@@ -299,16 +319,14 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
if (GCCInstallation.isValid()) {
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath = GCCInstallation.getParentLibPath();
- const Multilib &Multilib = GCCInstallation.getMultilib();
- const MultilibSet &Multilibs = GCCInstallation.getMultilibs();
// Add toolchain / multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, Multilib,
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
GCCInstallation.getInstallPath(), Paths);
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
- addPathIfExists(D, GCCInstallation.getInstallPath() + Multilib.gccSuffix(),
+ addPathIfExists(D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
Paths);
// GCC cross compiling toolchains will install target libraries which ship
@@ -330,7 +348,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// Note that this matches the GCC behavior. See the below comment for where
// Clang diverges from GCC's behavior.
addPathIfExists(D, LibPath + "/../" + GCCTriple.str() + "/lib/../" +
- OSLibDir + Multilib.osSuffix(),
+ OSLibDir + SelectedMultilib.osSuffix(),
Paths);
// If the GCC installation we found is inside of the sysroot, we want to
@@ -433,6 +451,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/lib", Paths);
}
+ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
+ if (getTriple().isAndroid())
+ return ToolChain::CST_Libcxx;
+ return ToolChain::CST_Libstdcxx;
+}
+
bool Linux::HasNativeLLVMSupport() const { return true; }
Tool *Linux::buildLinker() const { return new tools::gnutools::Linker(*this); }
@@ -695,12 +719,28 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
const StringRef MIPSELMultiarchIncludeDirs[] = {
"/usr/include/mipsel-linux-gnu"};
const StringRef MIPS64MultiarchIncludeDirs[] = {
- "/usr/include/mips64-linux-gnu", "/usr/include/mips64-linux-gnuabi64"};
+ "/usr/include/mips64-linux-gnuabi64"};
const StringRef MIPS64ELMultiarchIncludeDirs[] = {
- "/usr/include/mips64el-linux-gnu",
"/usr/include/mips64el-linux-gnuabi64"};
+ const StringRef MIPSN32MultiarchIncludeDirs[] = {
+ "/usr/include/mips64-linux-gnuabin32"};
+ const StringRef MIPSN32ELMultiarchIncludeDirs[] = {
+ "/usr/include/mips64el-linux-gnuabin32"};
+ const StringRef MIPSR6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa32-linux-gnu"};
+ const StringRef MIPSR6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa32r6el-linux-gnu"};
+ const StringRef MIPS64R6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6-linux-gnuabi64"};
+ const StringRef MIPS64R6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6el-linux-gnuabi64"};
+ const StringRef MIPSN32R6MultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6-linux-gnuabin32"};
+ const StringRef MIPSN32R6ELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsisa64r6el-linux-gnuabin32"};
const StringRef PPCMultiarchIncludeDirs[] = {
- "/usr/include/powerpc-linux-gnu"};
+ "/usr/include/powerpc-linux-gnu",
+ "/usr/include/powerpc-linux-gnuspe"};
const StringRef PPC64MultiarchIncludeDirs[] = {
"/usr/include/powerpc64-linux-gnu"};
const StringRef PPC64LEMultiarchIncludeDirs[] = {
@@ -738,16 +778,38 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
break;
case llvm::Triple::mips:
- MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ MultiarchIncludeDirs = MIPSR6MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
break;
case llvm::Triple::mipsel:
- MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ MultiarchIncludeDirs = MIPSR6ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
break;
case llvm::Triple::mips64:
- MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32R6MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64R6MultiarchIncludeDirs;
+ else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32MultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
break;
case llvm::Triple::mips64el:
- MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
+ if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
+ if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32R6ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64R6ELMultiarchIncludeDirs;
+ else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
+ MultiarchIncludeDirs = MIPSN32ELMultiarchIncludeDirs;
+ else
+ MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
break;
case llvm::Triple::ppc:
MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
@@ -876,6 +938,9 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// Freescale SDK C++ headers are directly in <sysroot>/usr/include/c++,
// without a subdirectory corresponding to the gcc version.
LibDir.str() + "/../include/c++",
+ // Cray's gcc installation puts headers under "g++" without a
+ // version suffix.
+ LibDir.str() + "/../include/g++",
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
@@ -907,6 +972,12 @@ bool Linux::isPIEDefault() const {
getTriple().isMusl() || getSanitizerArgs().requiresPIE();
}
+bool Linux::IsMathErrnoDefault() const {
+ if (getTriple().isAndroid())
+ return false;
+ return Generic_ELF::IsMathErrnoDefault();
+}
+
SanitizerMask Linux::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
@@ -934,6 +1005,8 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
Res |= SanitizerKind::Thread;
+ if (IsX86_64)
+ Res |= SanitizerKind::KernelMemory;
if (IsX86_64 || IsMIPS64)
Res |= SanitizerKind::Efficiency;
if (IsX86 || IsX86_64)
@@ -954,7 +1027,7 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
// Add linker option -u__llvm_runtime_variable to cause runtime
// initialization module to be linked in.
- if (!Args.hasArg(options::OPT_coverage))
+ if ((!Args.hasArg(options::OPT_coverage)) && (!Args.hasArg(options::OPT_ftest_coverage)))
CmdArgs.push_back(Args.MakeArgString(
Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
ToolChain::addProfileRTLibs(Args, CmdArgs);
diff --git a/lib/Driver/ToolChains/Linux.h b/lib/Driver/ToolChains/Linux.h
index 22dbbecf6b96..4a662cb4b427 100644
--- a/lib/Driver/ToolChains/Linux.h
+++ b/lib/Driver/ToolChains/Linux.h
@@ -37,7 +37,9 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetDefaultCXXStdlibType() const override;
bool isPIEDefault() const override;
+ bool IsMathErrnoDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/MSP430.cpp b/lib/Driver/ToolChains/MSP430.cpp
new file mode 100644
index 000000000000..b2ff88dbd021
--- /dev/null
+++ b/lib/Driver/ToolChains/MSP430.cpp
@@ -0,0 +1,233 @@
+//===--- MSP430.cpp - MSP430 Helpers for Tools ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MSP430.h"
+#include "CommonArgs.h"
+#include "Gnu.h"
+#include "InputInfo.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Multilib.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+static bool isSupportedMCU(const StringRef MCU) {
+ return llvm::StringSwitch<bool>(MCU)
+#define MSP430_MCU(NAME) .Case(NAME, true)
+#include "clang/Basic/MSP430Target.def"
+ .Default(false);
+}
+
+static StringRef getSupportedHWMult(const Arg *MCU) {
+ if (!MCU)
+ return "none";
+
+ return llvm::StringSwitch<StringRef>(MCU->getValue())
+#define MSP430_MCU_FEAT(NAME, HWMULT) .Case(NAME, HWMULT)
+#include "clang/Basic/MSP430Target.def"
+ .Default("none");
+}
+
+static StringRef getHWMultLib(const ArgList &Args) {
+ StringRef HWMult = Args.getLastArgValue(options::OPT_mhwmult_EQ, "auto");
+ if (HWMult == "auto") {
+ HWMult = getSupportedHWMult(Args.getLastArg(options::OPT_mmcu_EQ));
+ }
+
+ return llvm::StringSwitch<StringRef>(HWMult)
+ .Case("16bit", "-lmul_16")
+ .Case("32bit", "-lmul_32")
+ .Case("f5series", "-lmul_f5")
+ .Default("-lmul_none");
+}
+
+void msp430::getMSP430TargetFeatures(const Driver &D, const ArgList &Args,
+ std::vector<StringRef> &Features) {
+ const Arg *MCU = Args.getLastArg(options::OPT_mmcu_EQ);
+ if (MCU && !isSupportedMCU(MCU->getValue())) {
+ D.Diag(diag::err_drv_clang_unsupported) << MCU->getValue();
+ return;
+ }
+
+ const Arg *HWMultArg = Args.getLastArg(options::OPT_mhwmult_EQ);
+ if (!MCU && !HWMultArg)
+ return;
+
+ StringRef HWMult = HWMultArg ? HWMultArg->getValue() : "auto";
+ StringRef SupportedHWMult = getSupportedHWMult(MCU);
+
+ if (HWMult == "auto") {
+ // 'auto' - deduce hw multiplier support based on mcu name provided.
+ // If no mcu name is provided, assume no hw multiplier is supported.
+ if (!MCU)
+ D.Diag(clang::diag::warn_drv_msp430_hwmult_no_device);
+ HWMult = SupportedHWMult;
+ }
+
+ if (HWMult == "none") {
+ // 'none' - disable hw multiplier.
+ Features.push_back("-hwmult16");
+ Features.push_back("-hwmult32");
+ Features.push_back("-hwmultf5");
+ return;
+ }
+
+ if (MCU && SupportedHWMult == "none")
+ D.Diag(clang::diag::warn_drv_msp430_hwmult_unsupported) << HWMult;
+ if (MCU && HWMult != SupportedHWMult)
+ D.Diag(clang::diag::warn_drv_msp430_hwmult_mismatch)
+ << SupportedHWMult << HWMult;
+
+ if (HWMult == "16bit") {
+ // '16bit' - for 16-bit only hw multiplier.
+ Features.push_back("+hwmult16");
+ } else if (HWMult == "32bit") {
+ // '32bit' - for 16/32-bit hw multiplier.
+ Features.push_back("+hwmult32");
+ } else if (HWMult == "f5series") {
+ // 'f5series' - for 16/32-bit hw multiplier supported by F5 series mcus.
+ Features.push_back("+hwmultf5");
+ } else {
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << HWMultArg->getAsString(Args) << HWMult;
+ }
+}
+
+/// MSP430 Toolchain
+MSP430ToolChain::MSP430ToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ StringRef MultilibSuf = "";
+
+ GCCInstallation.init(Triple, Args);
+ if (GCCInstallation.isValid()) {
+ MultilibSuf = GCCInstallation.getMultilib().gccSuffix();
+
+ SmallString<128> GCCBinPath;
+ llvm::sys::path::append(GCCBinPath,
+ GCCInstallation.getParentLibPath(), "..", "bin");
+ addPathIfExists(D, GCCBinPath, getProgramPaths());
+
+ SmallString<128> GCCRtPath;
+ llvm::sys::path::append(GCCRtPath,
+ GCCInstallation.getInstallPath(), MultilibSuf);
+ addPathIfExists(D, GCCRtPath, getFilePaths());
+ }
+
+ SmallString<128> SysRootDir(computeSysRoot());
+ llvm::sys::path::append(SysRootDir, "lib", MultilibSuf);
+ addPathIfExists(D, SysRootDir, getFilePaths());
+}
+
+std::string MSP430ToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ SmallString<128> Dir;
+ if (GCCInstallation.isValid())
+ llvm::sys::path::append(Dir, GCCInstallation.getParentLibPath(), "..",
+ GCCInstallation.getTriple().str());
+ else
+ llvm::sys::path::append(Dir, getDriver().Dir, "..", getTriple().str());
+
+ return Dir.str();
+}
+
+void MSP430ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ SmallString<128> Dir(computeSysRoot());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+}
+
+void MSP430ToolChain::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+
+ const auto *MCUArg = DriverArgs.getLastArg(options::OPT_mmcu_EQ);
+ if (!MCUArg)
+ return;
+
+ const StringRef MCU = MCUArg->getValue();
+ if (MCU.startswith("msp430i")) {
+ // 'i' should be in lower case as it's defined in TI MSP430-GCC headers
+ CC1Args.push_back(DriverArgs.MakeArgString(
+ "-D__MSP430i" + MCU.drop_front(7).upper() + "__"));
+ } else {
+ CC1Args.push_back(DriverArgs.MakeArgString("-D__" + MCU.upper() + "__"));
+ }
+}
+
+Tool *MSP430ToolChain::buildLinker() const {
+ return new tools::msp430::Linker(*this);
+}
+
+void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+ std::string Linker = ToolChain.GetProgramPath(getShortName());
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_T)) {
+ if (const Arg *MCUArg = Args.getLastArg(options::OPT_mmcu_EQ))
+ CmdArgs.push_back(
+ Args.MakeArgString("-T" + StringRef(MCUArg->getValue()) + ".ld"));
+ } else {
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
+ }
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back(Args.MakeArgString(getHWMultLib(Args)));
+ CmdArgs.push_back("-lgcc");
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lcrt");
+ CmdArgs.push_back("-lnosys");
+ }
+ CmdArgs.push_back("--end-group");
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker),
+ CmdArgs, Inputs));
+}
diff --git a/lib/Driver/ToolChains/MSP430.h b/lib/Driver/ToolChains/MSP430.h
new file mode 100644
index 000000000000..0fdceb75b963
--- /dev/null
+++ b/lib/Driver/ToolChains/MSP430.h
@@ -0,0 +1,71 @@
+//===--- MSP430.h - MSP430-specific Tool Helpers ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSP430_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSP430_H
+
+#include "Gnu.h"
+#include "InputInfo.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY MSP430ToolChain : public Generic_ELF {
+public:
+ MSP430ToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const override;
+
+protected:
+ Tool *buildLinker() const override;
+
+private:
+ std::string computeSysRoot() const;
+};
+
+} // end namespace toolchains
+
+namespace tools {
+namespace msp430 {
+
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC)
+ : GnuTool("MSP430::Linker", "msp430-elf-ld", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+void getMSP430TargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+} // end namespace msp430
+} // end namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSP430_H
diff --git a/lib/Driver/ToolChains/MSVC.cpp b/lib/Driver/ToolChains/MSVC.cpp
index d062c6abc955..7e34b0df5c8c 100644
--- a/lib/Driver/ToolChains/MSVC.cpp
+++ b/lib/Driver/ToolChains/MSVC.cpp
@@ -355,6 +355,15 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT__SLASH_Zd))
CmdArgs.push_back("-debug");
+ // Pass on /Brepro if it was passed to the compiler.
+ // Note that /Brepro maps to -mno-incremental-linker-compatible.
+ bool DefaultIncrementalLinkerCompatible =
+ C.getDefaultToolChain().getTriple().isWindowsMSVCEnvironment();
+ if (!Args.hasFlag(options::OPT_mincremental_linker_compatible,
+ options::OPT_mno_incremental_linker_compatible,
+ DefaultIncrementalLinkerCompatible))
+ CmdArgs.push_back("-Brepro");
+
bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd,
options::OPT_shared);
if (DLL) {
@@ -365,6 +374,17 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::string("-implib:") + ImplibName));
}
+ if (TC.getSanitizerArgs().needsFuzzer()) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-wholearchive:") +
+ TC.getCompilerRTArgString(Args, "fuzzer", false)));
+ CmdArgs.push_back(Args.MakeArgString("-debug"));
+ // Prevent the linker from padding sections we use for instrumentation
+ // arrays.
+ CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
+ }
+
if (TC.getSanitizerArgs().needsAsanRt()) {
CmdArgs.push_back(Args.MakeArgString("-debug"));
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
@@ -702,15 +722,15 @@ bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
}
bool MSVCToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
- // Emit unwind tables by default on Win64. All non-x86_32 Windows platforms
- // such as ARM and PPC actually require unwind tables, but LLVM doesn't know
- // how to generate them yet.
-
// Don't emit unwind tables by default for MachO targets.
if (getTriple().isOSBinFormatMachO())
return false;
- return getArch() == llvm::Triple::x86_64;
+ // All non-x86_32 Windows targets require unwind tables. However, LLVM
+ // doesn't know how to generate them for all targets, so only enable
+ // the ones that are actually implemented.
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
bool MSVCToolChain::isPICDefault() const {
@@ -1266,7 +1286,7 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
if (MSVT.empty() &&
Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC)) {
- // -fms-compatibility-version=19.11 is default, aka 2017
+ // -fms-compatibility-version=19.11 is default, aka 2017, 15.3
MSVT = VersionTuple(19, 11);
}
return MSVT;
@@ -1298,6 +1318,8 @@ MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
SanitizerMask MSVCToolChain::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
Res &= ~SanitizerKind::CFIMFCall;
return Res;
}
@@ -1356,6 +1378,7 @@ static void TranslateOptArg(Arg *A, llvm::opt::DerivedArgList &DAL,
}
break;
case 'g':
+ A->claim();
break;
case 'i':
if (I + 1 != E && OptStr[I + 1] == '-') {
diff --git a/lib/Driver/ToolChains/MSVC.h b/lib/Driver/ToolChains/MSVC.h
index 1db589ec9706..ebca0018bb85 100644
--- a/lib/Driver/ToolChains/MSVC.h
+++ b/lib/Driver/ToolChains/MSVC.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
#include "Cuda.h"
+#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -78,6 +79,18 @@ public:
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
+ /// Set CodeView as the default debug info format. Users can use -gcodeview
+ /// and -gdwarf to override the default.
+ codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
+ return codegenoptions::DIF_CodeView;
+ }
+
+ /// Set the debugger tuning to "default", since we're definitely not tuning
+ /// for GDB.
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::Default;
+ }
+
enum class SubDirectoryType {
Bin,
Include,
diff --git a/lib/Driver/ToolChains/MinGW.cpp b/lib/Driver/ToolChains/MinGW.cpp
index a88e00f0c8e8..2d5217d03d3a 100644
--- a/lib/Driver/ToolChains/MinGW.cpp
+++ b/lib/Driver/ToolChains/MinGW.cpp
@@ -10,10 +10,12 @@
#include "MinGW.h"
#include "InputInfo.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -52,7 +54,7 @@ void tools::MinGW::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_gsplit_dwarf))
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
- SplitDebugName(Args, Inputs[0]));
+ SplitDebugName(Args, Output));
}
void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
@@ -95,7 +97,7 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const ToolChain &TC = getToolChain();
const Driver &D = TC.getDriver();
- // const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+ const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
ArgStringList CmdArgs;
@@ -187,8 +189,6 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddFilePathLibArgs(Args, CmdArgs);
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
- // TODO: Add ASan stuff here
-
// TODO: Add profile stuff here
if (TC.ShouldLinkCXXStdlib(Args)) {
@@ -220,8 +220,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lssp_nonshared");
CmdArgs.push_back("-lssp");
}
- if (Args.hasArg(options::OPT_fopenmp))
- CmdArgs.push_back("-lgomp");
+
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ switch (TC.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case Driver::OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5md");
+ break;
+ case Driver::OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+ break;
+ case Driver::OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
AddLibGCC(Args, CmdArgs);
@@ -231,6 +247,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
+ if (Sanitize.needsAsanRt()) {
+ // MinGW always links against a shared MSVCRT.
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic", true));
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic_runtime_thunk"));
+ CmdArgs.push_back(Args.MakeArgString("--require-defined"));
+ CmdArgs.push_back(Args.MakeArgString(TC.getArch() == llvm::Triple::x86
+ ? "___asan_seh_interceptor"
+ : "__asan_seh_interceptor"));
+ // Make sure the linker consider all object files from the dynamic
+ // runtime thunk.
+ CmdArgs.push_back(Args.MakeArgString("--whole-archive"));
+ CmdArgs.push_back(Args.MakeArgString(
+ TC.getCompilerRT(Args, "asan_dynamic_runtime_thunk")));
+ CmdArgs.push_back(Args.MakeArgString("--no-whole-archive"));
+ }
+
if (!HasWindowsApp) {
// Add system libraries. If linking to libwindowsapp.a, that import
// library replaces all these and we shouldn't accidentally try to
@@ -359,6 +393,10 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(Base + "lib");
// openSUSE
getFilePaths().push_back(Base + Arch + "/sys-root/mingw/lib");
+
+ NativeLLVMSupport =
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
+ .equals_lower("lld");
}
bool toolchains::MinGW::IsIntegratedAssemblerDefault() const { return true; }
@@ -386,7 +424,17 @@ Tool *toolchains::MinGW::buildLinker() const {
return new tools::MinGW::Linker(*this);
}
+bool toolchains::MinGW::HasNativeLLVMSupport() const {
+ return NativeLLVMSupport;
+}
+
bool toolchains::MinGW::IsUnwindTablesDefault(const ArgList &Args) const {
+ Arg *ExceptionArg = Args.getLastArg(options::OPT_fsjlj_exceptions,
+ options::OPT_fseh_exceptions,
+ options::OPT_fdwarf_exceptions);
+ if (ExceptionArg &&
+ ExceptionArg->getOption().matches(options::OPT_fseh_exceptions))
+ return true;
return getArch() == llvm::Triple::x86_64;
}
@@ -407,6 +455,12 @@ toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
return llvm::ExceptionHandling::DwarfCFI;
}
+SanitizerMask toolchains::MinGW::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ return Res;
+}
+
void toolchains::MinGW::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
diff --git a/lib/Driver/ToolChains/MinGW.h b/lib/Driver/ToolChains/MinGW.h
index 0c3919d29f77..04d23006ee75 100644
--- a/lib/Driver/ToolChains/MinGW.h
+++ b/lib/Driver/ToolChains/MinGW.h
@@ -59,12 +59,16 @@ public:
MinGW(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ bool HasNativeLLVMSupport() const override;
+
bool IsIntegratedAssemblerDefault() const override;
bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
+ SanitizerMask getSupportedSanitizers() const override;
+
llvm::ExceptionHandling GetExceptionModel(
const llvm::opt::ArgList &Args) const override;
@@ -97,6 +101,8 @@ private:
void findGccLibDir();
llvm::ErrorOr<std::string> findGcc();
llvm::ErrorOr<std::string> findClangRelativeSysroot();
+
+ bool NativeLLVMSupport;
};
} // end namespace toolchains
diff --git a/lib/Driver/ToolChains/Minix.cpp b/lib/Driver/ToolChains/Minix.cpp
index 39e6f90b6ef0..7fadcb129d46 100644
--- a/lib/Driver/ToolChains/Minix.cpp
+++ b/lib/Driver/ToolChains/Minix.cpp
@@ -8,13 +8,13 @@
//===----------------------------------------------------------------------===//
#include "Minix.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
-#include "clang/Basic/VirtualFileSystem.h"
+#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang;
diff --git a/lib/Driver/ToolChains/MipsLinux.h b/lib/Driver/ToolChains/MipsLinux.h
index d4b476d883e6..edf58a62b95c 100644
--- a/lib/Driver/ToolChains/MipsLinux.h
+++ b/lib/Driver/ToolChains/MipsLinux.h
@@ -49,7 +49,7 @@ public:
}
const char *getDefaultLinker() const override {
- return "lld";
+ return "ld.lld";
}
private:
diff --git a/lib/Driver/ToolChains/NetBSD.cpp b/lib/Driver/ToolChains/NetBSD.cpp
index 02caafda1657..b1321cacaf7a 100644
--- a/lib/Driver/ToolChains/NetBSD.cpp
+++ b/lib/Driver/ToolChains/NetBSD.cpp
@@ -164,7 +164,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- arm::appendEBLinkFlags(Args, CmdArgs, ToolChain.getEffectiveTriple());
+ arm::appendBE8LinkFlag(Args, CmdArgs, ToolChain.getEffectiveTriple());
CmdArgs.push_back("-m");
switch (ToolChain.getTriple().getEnvironment()) {
case llvm::Triple::EABI:
@@ -448,12 +448,24 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Vptr;
}
if (IsX86_64) {
+ Res |= SanitizerKind::DataFlow;
Res |= SanitizerKind::Efficiency;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::HWAddress;
Res |= SanitizerKind::KernelAddress;
+ Res |= SanitizerKind::KernelHWAddress;
+ Res |= SanitizerKind::KernelMemory;
Res |= SanitizerKind::Memory;
Res |= SanitizerKind::Thread;
}
return Res;
}
+
+void NetBSD::addClangTargetOptions(const ArgList &,
+ ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ const SanitizerArgs &SanArgs = getSanitizerArgs();
+ if (SanArgs.hasAnySanitizer())
+ CC1Args.push_back("-D_REENTRANT");
+}
diff --git a/lib/Driver/ToolChains/NetBSD.h b/lib/Driver/ToolChains/NetBSD.h
index 49e3a58d02c3..ae0865fd6573 100644
--- a/lib/Driver/ToolChains/NetBSD.h
+++ b/lib/Driver/ToolChains/NetBSD.h
@@ -76,6 +76,10 @@ public:
SanitizerMask getSupportedSanitizers() const override;
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/lib/Driver/ToolChains/OpenBSD.cpp b/lib/Driver/ToolChains/OpenBSD.cpp
index 7b98cd62bbfc..3d35d37b7db3 100644
--- a/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/lib/Driver/ToolChains/OpenBSD.cpp
@@ -111,9 +111,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
- if (getToolChain().getArch() == llvm::Triple::mips64)
+ if (ToolChain.getArch() == llvm::Triple::mips64)
CmdArgs.push_back("-EB");
- else if (getToolChain().getArch() == llvm::Triple::mips64el)
+ else if (ToolChain.getArch() == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
@@ -138,7 +138,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_pie))
CmdArgs.push_back("-pie");
- if (Args.hasArg(options::OPT_nopie))
+ if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-nopie");
if (Output.isFilename()) {
@@ -149,44 +149,40 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crt0 = nullptr;
+ const char *crtbegin = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("gcrt0.o")));
+ crt0 = "gcrt0.o";
else if (Args.hasArg(options::OPT_static) &&
!Args.hasArg(options::OPT_nopie))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("rcrt0.o")));
+ crt0 = "rcrt0.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crt0.o")));
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ crt0 = "crt0.o";
+ crtbegin = "crtbegin.o";
} else {
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ crtbegin = "crtbeginS.o";
}
- }
- std::string Triple = getToolChain().getTripleString();
- if (Triple.substr(0, 6) == "x86_64")
- Triple.replace(0, 6, "amd64");
- CmdArgs.push_back(
- Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
- CmdArgs.push_back(Args.MakeArgString("-L/usr/lib"));
+ if (crt0)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt0)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e,
+ options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-lm_p");
else
@@ -202,7 +198,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
if (Args.hasArg(options::OPT_pthread)) {
if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
@@ -218,21 +214,22 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ const char *crtend = nullptr;
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ crtend = "crtend.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ crtend = "crtendS.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
}
const char *Exec = Args.MakeArgString(
- !NeedsSanitizerDeps ? getToolChain().GetLinkerPath()
- : getToolChain().GetProgramPath("ld.lld"));
+ !NeedsSanitizerDeps ? ToolChain.GetLinkerPath()
+ : ToolChain.GetProgramPath("ld.lld"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -257,8 +254,7 @@ SanitizerMask OpenBSD::getSupportedSanitizers() const {
OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
- getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
diff --git a/lib/Driver/ToolChains/OpenBSD.h b/lib/Driver/ToolChains/OpenBSD.h
index bf8dfa4653cb..1912abdb95bc 100644
--- a/lib/Driver/ToolChains/OpenBSD.h
+++ b/lib/Driver/ToolChains/OpenBSD.h
@@ -58,6 +58,14 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
bool isPIEDefault() const override { return true; }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
diff --git a/lib/Driver/ToolChains/PS4CPU.cpp b/lib/Driver/ToolChains/PS4CPU.cpp
index a4b74d492331..0708d25fe45c 100644
--- a/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/lib/Driver/ToolChains/PS4CPU.cpp
@@ -121,7 +121,8 @@ static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
assert(Output.isNothing() && "Invalid output.");
}
- AddPS4SanitizerArgs(ToolChain, CmdArgs);
+ if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
@@ -190,7 +191,8 @@ static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
assert(Output.isNothing() && "Invalid output.");
}
- AddPS4SanitizerArgs(ToolChain, CmdArgs);
+ if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ AddPS4SanitizerArgs(ToolChain, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
const char *crt1 = nullptr;
diff --git a/lib/Driver/ToolChains/RISCV.cpp b/lib/Driver/ToolChains/RISCVToolchain.cpp
index 31996fc588f1..e787c82b28a8 100644
--- a/lib/Driver/ToolChains/RISCV.cpp
+++ b/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -1,4 +1,4 @@
-//===--- RISCV.cpp - RISCV ToolChain Implementations ------------*- C++ -*-===//
+//===--- RISCVToolchain.cpp - RISCV ToolChain Implementations ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,13 @@
//
//===----------------------------------------------------------------------===//
-#include "RISCV.h"
+#include "RISCVToolchain.h"
#include "CommonArgs.h"
#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -27,7 +28,7 @@ RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
- getFilePaths().push_back(D.SysRoot + "/lib");
+ getFilePaths().push_back(computeSysRoot() + "/lib");
if (GCCInstallation.isValid()) {
getFilePaths().push_back(GCCInstallation.getInstallPath().str());
getProgramPaths().push_back(
@@ -39,13 +40,21 @@ Tool *RISCVToolChain::buildLinker() const {
return new tools::RISCV::Linker(*this);
}
+void RISCVToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+ CC1Args.push_back("-fuse-init-array");
+}
+
void RISCVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc))
return;
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
- SmallString<128> Dir(getDriver().SysRoot);
+ SmallString<128> Dir(computeSysRoot());
llvm::sys::path::append(Dir, "include");
addSystemInclude(DriverArgs, CC1Args, Dir.str());
}
@@ -54,15 +63,30 @@ void RISCVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
void RISCVToolChain::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- StringRef LibDir = GCCInstallation.getParentLibPath();
const GCCVersion &Version = GCCInstallation.getVersion();
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
- addLibStdCXXIncludePaths(
- LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
+ addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text,
"", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
}
+std::string RISCVToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ if (!GCCInstallation.isValid())
+ return std::string();
+
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ std::string SysRootDir = LibDir.str() + "/../" + TripleStr.str();
+
+ if (!llvm::sys::fs::exists(SysRootDir))
+ return std::string();
+
+ return SysRootDir;
+}
+
void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
diff --git a/lib/Driver/ToolChains/RISCV.h b/lib/Driver/ToolChains/RISCVToolchain.h
index 6f59d84020d8..4b38690b1b61 100644
--- a/lib/Driver/ToolChains/RISCV.h
+++ b/lib/Driver/ToolChains/RISCVToolchain.h
@@ -1,4 +1,4 @@
-//===--- RISCV.h - RISCV ToolChain Implementations --------------*- C++ -*-===//
+//===--- RISCVToolchain.h - RISCV ToolChain Implementations -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCV_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCV_H
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCVTOOLCHAIN_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCVTOOLCHAIN_H
#include "Gnu.h"
#include "clang/Driver/ToolChain.h"
@@ -23,6 +23,9 @@ public:
const llvm::opt::ArgList &Args);
bool IsIntegratedAssemblerDefault() const override { return true; }
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -32,6 +35,9 @@ public:
protected:
Tool *buildLinker() const override;
+
+private:
+ std::string computeSysRoot() const;
};
} // end namespace toolchains
@@ -54,4 +60,4 @@ public:
} // end namespace driver
} // end namespace clang
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCV_H
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_RISCVTOOLCHAIN_H
diff --git a/lib/Driver/ToolChains/Solaris.h b/lib/Driver/ToolChains/Solaris.h
index 9e14269b393e..4d9c828b5c6b 100644
--- a/lib/Driver/ToolChains/Solaris.h
+++ b/lib/Driver/ToolChains/Solaris.h
@@ -55,8 +55,6 @@ public:
Solaris(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- bool IsIntegratedAssemblerDefault() const override { return true; }
-
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/lib/Driver/ToolChains/WebAssembly.cpp b/lib/Driver/ToolChains/WebAssembly.cpp
index 94f7279bbdba..6310d5fabaec 100644
--- a/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/lib/Driver/ToolChains/WebAssembly.cpp
@@ -22,15 +22,20 @@ using namespace clang;
using namespace llvm::opt;
wasm::Linker::Linker(const ToolChain &TC)
- : GnuTool("wasm::Linker", "lld", TC) {}
-
-bool wasm::Linker::isLinkJob() const {
- return true;
+ : GnuTool("wasm::Linker", "lld", TC) {}
+
+/// Following the conventions in https://wiki.debian.org/Multiarch/Tuples,
+/// we remove the vendor field to form the multiarch triple.
+static std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) {
+ return (TargetTriple.getArchName() + "-" +
+ TargetTriple.getOSAndEnvironmentName()).str();
}
-bool wasm::Linker::hasIntegratedCPP() const {
- return false;
-}
+bool wasm::Linker::isLinkJob() const { return true; }
+
+bool wasm::Linker::hasIntegratedCPP() const { return false; }
void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
@@ -41,8 +46,6 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const ToolChain &ToolChain = getToolChain();
const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
ArgStringList CmdArgs;
- CmdArgs.push_back("-flavor");
- CmdArgs.push_back("wasm");
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("--strip-all");
@@ -75,13 +78,23 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args)
- : ToolChain(D, Triple, Args) {
+ : ToolChain(D, Triple, Args) {
assert(Triple.isArch32Bit() != Triple.isArch64Bit());
getProgramPaths().push_back(getDriver().getInstalledDir());
- getFilePaths().push_back(getDriver().SysRoot + "/lib");
+ if (getTriple().getOS() == llvm::Triple::UnknownOS) {
+ // Theoretically an "unknown" OS should mean no standard libraries, however
+ // it could also mean that a custom set of libraries is in use, so just add
+ // /lib to the search path. Disable multiarch in this case, to discourage
+ // paths containing "unknown" from acquiring meanings.
+ getFilePaths().push_back(getDriver().SysRoot + "/lib");
+ } else {
+ const std::string MultiarchTriple =
+ getMultiarchTriple(getDriver(), Triple, getDriver().SysRoot);
+ getFilePaths().push_back(getDriver().SysRoot + "/lib/" + MultiarchTriple);
+ }
}
bool WebAssembly::IsMathErrnoDefault() const { return false; }
@@ -117,7 +130,8 @@ ToolChain::RuntimeLibType WebAssembly::GetDefaultRuntimeLibType() const {
return ToolChain::RLT_CompilerRT;
}
-ToolChain::CXXStdlibType WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
+ToolChain::CXXStdlibType
+WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
StringRef Value = A->getValue();
if (Value != "libc++")
@@ -129,16 +143,29 @@ ToolChain::CXXStdlibType WebAssembly::GetCXXStdlibType(const ArgList &Args) cons
void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (!DriverArgs.hasArg(options::OPT_nostdinc))
+ if (!DriverArgs.hasArg(options::OPT_nostdinc)) {
+ if (getTriple().getOS() != llvm::Triple::UnknownOS) {
+ const std::string MultiarchTriple =
+ getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
+ addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include/" + MultiarchTriple);
+ }
addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
+ }
}
void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (!DriverArgs.hasArg(options::OPT_nostdlibinc) &&
- !DriverArgs.hasArg(options::OPT_nostdincxx))
+ !DriverArgs.hasArg(options::OPT_nostdincxx)) {
+ if (getTriple().getOS() != llvm::Triple::UnknownOS) {
+ const std::string MultiarchTriple =
+ getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/include/" + MultiarchTriple + "/c++/v1");
+ }
addSystemInclude(DriverArgs, CC1Args,
getDriver().SysRoot + "/include/c++/v1");
+ }
}
void WebAssembly::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
diff --git a/lib/Driver/ToolChains/WebAssembly.h b/lib/Driver/ToolChains/WebAssembly.h
index cdbb34ff919f..d795bad90020 100644
--- a/lib/Driver/ToolChains/WebAssembly.h
+++ b/lib/Driver/ToolChains/WebAssembly.h
@@ -51,14 +51,15 @@ private:
bool hasBlocksRuntime() const override;
bool SupportsProfiling() const override;
bool HasNativeLLVMSupport() const override;
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
- void AddClangSystemIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -66,9 +67,7 @@ private:
llvm::opt::ArgStringList &CmdArgs) const override;
std::string getThreadModel() const override;
- const char *getDefaultLinker() const override {
- return "lld";
- }
+ const char *getDefaultLinker() const override { return "wasm-ld"; }
Tool *buildLinker() const override;
};
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index 45bb699cfb88..9d2737bbc719 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -312,9 +312,11 @@ ID types::lookupHeaderTypeForSourceType(ID Id) {
default:
return Id;
+ // FIXME: Handle preprocessed input types.
case types::TY_C:
return types::TY_CHeader;
case types::TY_CXX:
+ case types::TY_CXXModule:
return types::TY_CXXHeader;
case types::TY_ObjC:
return types::TY_ObjCHeader;
diff --git a/lib/Driver/XRayArgs.cpp b/lib/Driver/XRayArgs.cpp
index 30b0e72760c9..1a48493d7dc7 100644
--- a/lib/Driver/XRayArgs.cpp
+++ b/lib/Driver/XRayArgs.cpp
@@ -50,13 +50,23 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
- } else if (Triple.getOS() == llvm::Triple::FreeBSD ||
- Triple.getOS() == llvm::Triple::OpenBSD ||
- Triple.getOS() == llvm::Triple::NetBSD) {
+ } else if (Triple.isOSFreeBSD() ||
+ Triple.isOSOpenBSD() ||
+ Triple.isOSNetBSD() ||
+ Triple.getOS() == llvm::Triple::Darwin) {
if (Triple.getArch() != llvm::Triple::x86_64) {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
}
+ } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::aarch64:
+ break;
+ default:
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
} else {
D.Diag(diag::err_drv_clang_unsupported)
<< (std::string(XRayInstrumentOption) + " on " + Triple.str());
@@ -164,7 +174,7 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
}
// Then we want to sort and unique the modes we've collected.
- llvm::sort(Modes.begin(), Modes.end());
+ llvm::sort(Modes);
Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
}
}
@@ -214,4 +224,19 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
ModeOpt += Mode;
CmdArgs.push_back(Args.MakeArgString(ModeOpt));
}
+
+ SmallString<64> Bundle("-fxray-instrumentation-bundle=");
+ if (InstrumentationBundle.full()) {
+ Bundle += "all";
+ } else if (InstrumentationBundle.empty()) {
+ Bundle += "none";
+ } else {
+ if (InstrumentationBundle.has(XRayInstrKind::Function))
+ Bundle += "function";
+ if (InstrumentationBundle.has(XRayInstrKind::Custom))
+ Bundle += "custom";
+ if (InstrumentationBundle.has(XRayInstrKind::Typed))
+ Bundle += "typed";
+ }
+ CmdArgs.push_back(Args.MakeArgString(Bundle));
}
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index b53a70d87582..7c9ab170093f 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -420,8 +420,8 @@ static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
commit.replace(MsgRange, "@[]");
return true;
}
- SourceRange ArgRange(Msg->getArg(0)->getLocStart(),
- Msg->getArg(Msg->getNumArgs()-2)->getLocEnd());
+ SourceRange ArgRange(Msg->getArg(0)->getBeginLoc(),
+ Msg->getArg(Msg->getNumArgs() - 2)->getEndLoc());
commit.replaceWithInner(MsgRange, ArgRange);
commit.insertWrap("@[", ArgRange, "]");
return true;
@@ -550,8 +550,8 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
// Range of arguments up until and including the last key.
// The sentinel and first value are cut off, the value will move after the
// key.
- SourceRange ArgRange(Msg->getArg(1)->getLocStart(),
- Msg->getArg(SentinelIdx-1)->getLocEnd());
+ SourceRange ArgRange(Msg->getArg(1)->getBeginLoc(),
+ Msg->getArg(SentinelIdx - 1)->getEndLoc());
commit.insertWrap("@{", ArgRange, "}");
commit.replaceWithInner(MsgRange, ArgRange);
return true;
@@ -591,8 +591,7 @@ static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
}
// Range of arguments up until and including the last key.
// The first value is cut off, the value will move after the key.
- SourceRange ArgRange(Keys.front()->getLocStart(),
- Keys.back()->getLocEnd());
+ SourceRange ArgRange(Keys.front()->getBeginLoc(), Keys.back()->getEndLoc());
commit.insertWrap("@{", ArgRange, "}");
commit.replaceWithInner(MsgRange, ArgRange);
return true;
@@ -1079,13 +1078,16 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_NonAtomicToAtomic:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
return false;
case CK_BooleanToSignedIntegral:
llvm_unreachable("OpenCL-specific cast in Objective-C?");
+
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ llvm_unreachable("Fixed point types are disabled for Objective-C");
}
}
@@ -1131,7 +1133,7 @@ static bool doRewriteToUTF8StringBoxedExpressionHelper(
if (const StringLiteral *
StrE = dyn_cast<StringLiteral>(OrigArg->IgnoreParens())) {
commit.replaceWithInner(Msg->getSourceRange(), StrE->getSourceRange());
- commit.insert(StrE->getLocStart(), "@");
+ commit.insert(StrE->getBeginLoc(), "@");
return true;
}
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index fc2f891e0857..e6ce01b520b5 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -67,10 +67,11 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
unsigned ContentStartColumn,
unsigned ColumnLimit,
unsigned TabWidth,
- encoding::Encoding Encoding) {
- LLVM_DEBUG(llvm::dbgs() << "Comment split: \"" << Text << ", " << ColumnLimit
- << "\", Content start: " << ContentStartColumn
- << "\n");
+ encoding::Encoding Encoding,
+ const FormatStyle &Style) {
+ LLVM_DEBUG(llvm::dbgs() << "Comment split: \"" << Text
+ << "\", Column limit: " << ColumnLimit
+ << ", Content start: " << ContentStartColumn << "\n");
if (ColumnLimit <= ContentStartColumn + 1)
return BreakableToken::Split(StringRef::npos, 0);
@@ -89,12 +90,21 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
StringRef::size_type SpaceOffset = Text.find_last_of(Blanks, MaxSplitBytes);
- // Do not split before a number followed by a dot: this would be interpreted
- // as a numbered list, which would prevent re-flowing in subsequent passes.
static auto *const kNumberedListRegexp = new llvm::Regex("^[1-9][0-9]?\\.");
- if (SpaceOffset != StringRef::npos &&
- kNumberedListRegexp->match(Text.substr(SpaceOffset).ltrim(Blanks)))
- SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
+ while (SpaceOffset != StringRef::npos) {
+ // Do not split before a number followed by a dot: this would be interpreted
+ // as a numbered list, which would prevent re-flowing in subsequent passes.
+ if (kNumberedListRegexp->match(Text.substr(SpaceOffset).ltrim(Blanks)))
+ SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
+ // In JavaScript, some @tags can be followed by {, and machinery that parses
+ // these comments will fail to understand the comment if followed by a line
+ // break. So avoid ever breaking before a {.
+ else if (Style.Language == FormatStyle::LK_JavaScript &&
+ SpaceOffset + 1 < Text.size() && Text[SpaceOffset + 1] == '{')
+ SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
+ else
+ break;
+ }
if (SpaceOffset == StringRef::npos ||
// Don't break at leading whitespace.
@@ -109,6 +119,12 @@ static BreakableToken::Split getCommentSplit(StringRef Text,
Blanks, std::max<unsigned>(MaxSplitBytes, FirstNonWhitespace));
}
if (SpaceOffset != StringRef::npos && SpaceOffset != 0) {
+ // adaptStartOfLine will break after lines starting with /** if the comment
+ // is broken anywhere. Avoid emitting this break twice here.
+ // Example: in /** longtextcomesherethatbreaks */ (with ColumnLimit 20) will
+ // insert a break after /**, so this code must not insert the same break.
+ if (SpaceOffset == 1 && Text[SpaceOffset - 1] == '*')
+ return BreakableToken::Split(StringRef::npos, 0);
StringRef BeforeCut = Text.substr(0, SpaceOffset).rtrim(Blanks);
StringRef AfterCut = Text.substr(SpaceOffset).ltrim(Blanks);
return BreakableToken::Split(BeforeCut.size(),
@@ -260,7 +276,7 @@ BreakableComment::getSplit(unsigned LineIndex, unsigned TailOffset,
return Split(StringRef::npos, 0);
return getCommentSplit(Content[LineIndex].substr(TailOffset),
ContentStartColumn, ColumnLimit, Style.TabWidth,
- Encoding);
+ Encoding, Style);
}
void BreakableComment::compressWhitespace(
@@ -620,6 +636,8 @@ void BreakableBlockComment::adaptStartOfLine(
if (DelimitersOnNewline) {
// Since we're breaking at index 1 below, the break position and the
// break length are the same.
+ // Note: this works because getCommentSplit is careful never to split at
+ // the beginning of a line.
size_t BreakLength = Lines[0].substr(1).find_first_not_of(Blanks);
if (BreakLength != StringRef::npos)
insertBreak(LineIndex, 0, Split(1, BreakLength), /*ContentIndent=*/0,
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index 7ca588a675b5..c369b94b9987 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -403,7 +403,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// }.bind(...));
// FIXME: We should find a more generic solution to this problem.
!(State.Column <= NewLineColumn &&
- Style.Language == FormatStyle::LK_JavaScript))
+ Style.Language == FormatStyle::LK_JavaScript) &&
+ !(Previous.closesScopeAfterBlock() &&
+ State.Column <= NewLineColumn))
return true;
// If the template declaration spans multiple lines, force wrap before the
@@ -700,7 +702,8 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// Indent relative to the RHS of the expression unless this is a simple
// assignment without binary expression on the RHS. Also indent relative to
// unary operators and the colons of constructor initializers.
- State.Stack.back().LastSpace = State.Column;
+ if (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None)
+ State.Stack.back().LastSpace = State.Column;
} else if (Previous.is(TT_InheritanceColon)) {
State.Stack.back().Indent = State.Column;
State.Stack.back().LastSpace = State.Column;
@@ -1132,7 +1135,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// }, a, b, c);
if (Current.isNot(tok::comment) && Previous &&
Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
- !Previous->is(TT_DictLiteral) && State.Stack.size() > 1) {
+ !Previous->is(TT_DictLiteral) && State.Stack.size() > 1 &&
+ !State.Stack.back().HasMultipleNestedBlocks) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].NoLineBreak = true;
@@ -1499,10 +1503,25 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// violate the rectangle rule and visually flows within the surrounding
// source.
bool ContentStartsOnNewline = Current.TokenText[OldPrefixSize] == '\n';
- unsigned NextStartColumn =
- ContentStartsOnNewline
- ? State.Stack.back().NestedBlockIndent + Style.IndentWidth
- : FirstStartColumn;
+ // If this token is the last parameter (checked by looking if it's followed by
+ // `)`, the base the indent off the line's nested block indent. Otherwise,
+ // base the indent off the arguments indent, so we can achieve:
+ // fffffffffff(1, 2, 3, R"pb(
+ // key1: 1 #
+ // key2: 2)pb");
+ //
+ // fffffffffff(1, 2, 3,
+ // R"pb(
+ // key1: 1 #
+ // key2: 2
+ // )pb",
+ // 5);
+ unsigned CurrentIndent = (Current.Next && Current.Next->is(tok::r_paren))
+ ? State.Stack.back().NestedBlockIndent
+ : State.Stack.back().Indent;
+ unsigned NextStartColumn = ContentStartsOnNewline
+ ? CurrentIndent + Style.IndentWidth
+ : FirstStartColumn;
// The last start column is the column the raw string suffix starts if it is
// put on a newline.
@@ -1514,7 +1533,7 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
// indent.
unsigned LastStartColumn = Current.NewlinesBefore
? FirstStartColumn - NewPrefixSize
- : State.Stack.back().NestedBlockIndent;
+ : CurrentIndent;
std::pair<tooling::Replacements, unsigned> Fixes = internal::reformat(
RawStringStyle, RawText, {tooling::Range(0, RawText.size())},
@@ -1524,8 +1543,7 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
auto NewCode = applyAllReplacements(RawText, Fixes.first);
tooling::Replacements NoFixes;
if (!NewCode) {
- State.Column += Current.ColumnWidth;
- return 0;
+ return addMultilineToken(Current, State);
}
if (!DryRun) {
if (NewDelimiter != OldDelimiter) {
@@ -1574,6 +1592,13 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned PrefixExcessCharacters =
StartColumn + NewPrefixSize > Style.ColumnLimit ?
StartColumn + NewPrefixSize - Style.ColumnLimit : 0;
+ bool IsMultiline =
+ ContentStartsOnNewline || (NewCode->find('\n') != std::string::npos);
+ if (IsMultiline) {
+ // Break before further function parameters on all levels.
+ for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
+ State.Stack[i].BreakBeforeParameter = true;
+ }
return Fixes.second + PrefixExcessCharacters * Style.PenaltyExcessCharacter;
}
@@ -1840,7 +1865,8 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// No break opportunity - update the penalty and continue with the next
// logical line.
if (LineIndex < EndIndex - 1)
- // The last line's penalty is handled in addNextStateToQueue().
+ // The last line's penalty is handled in addNextStateToQueue() or when
+ // calling replaceWhitespaceAfterLastLine below.
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
LLVM_DEBUG(llvm::dbgs() << " No break opportunity.\n");
@@ -2095,6 +2121,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Token->getSplitAfterLastLine(TailOffset);
if (SplitAfterLastLine.first != StringRef::npos) {
LLVM_DEBUG(llvm::dbgs() << "Replacing whitespace after last line.\n");
+
+ // We add the last line's penalty here, since that line is going to be split
+ // now.
+ Penalty += Style.PenaltyExcessCharacter *
+ (ContentStartColumn + RemainingTokenColumns - ColumnLimit);
+
if (!DryRun)
Token->replaceWhitespaceAfterLastLine(TailOffset, SplitAfterLastLine,
Whitespaces);
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 9a2da69e89b1..2c4f8760540a 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -29,7 +29,6 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "llvm/ADT/STLExtras.h"
@@ -38,6 +37,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
#include <algorithm>
#include <memory>
@@ -414,6 +414,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
@@ -469,6 +470,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("Standard", Style.Standard);
+ IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
IO.mapOptional("UseTab", Style.UseTab);
}
@@ -714,6 +716,8 @@ FormatStyle getLLVMStyle() {
LLVMStyle.DisableFormat = false;
LLVMStyle.SortIncludes = true;
LLVMStyle.SortUsingDeclarations = true;
+ LLVMStyle.StatementMacros.push_back("Q_UNUSED");
+ LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
return LLVMStyle;
}
@@ -819,7 +823,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.JavaScriptQuotes = FormatStyle::JSQS_Single;
GoogleStyle.JavaScriptWrapImports = false;
} else if (Language == FormatStyle::LK_Proto) {
- GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.SpacesInContainerLiterals = false;
GoogleStyle.Cpp11BracedListStyle = false;
@@ -844,6 +848,20 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
ChromiumStyle.BreakAfterJavaFieldAnnotations = true;
ChromiumStyle.ContinuationIndentWidth = 8;
ChromiumStyle.IndentWidth = 4;
+ // See styleguide for import groups:
+ // https://chromium.googlesource.com/chromium/src/+/master/styleguide/java/java.md#Import-Order
+ ChromiumStyle.JavaImportGroups = {
+ "android",
+ "com",
+ "dalvik",
+ "junit",
+ "org",
+ "com.google.android.apps.chrome",
+ "org.chromium",
+ "java",
+ "javax",
+ };
+ ChromiumStyle.SortIncludes = true;
} else if (Language == FormatStyle::LK_JavaScript) {
ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
@@ -1309,8 +1327,7 @@ private:
std::set<unsigned> DeletedLines;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
auto &Line = *AnnotatedLines[i];
- if (Line.startsWith(tok::kw_namespace) ||
- Line.startsWith(tok::kw_inline, tok::kw_namespace)) {
+ if (Line.startsWithNamespace()) {
checkEmptyNamespace(AnnotatedLines, i, i, DeletedLines);
}
}
@@ -1347,9 +1364,7 @@ private:
if (AnnotatedLines[CurrentLine]->startsWith(tok::r_brace))
break;
- if (AnnotatedLines[CurrentLine]->startsWith(tok::kw_namespace) ||
- AnnotatedLines[CurrentLine]->startsWith(tok::kw_inline,
- tok::kw_namespace)) {
+ if (AnnotatedLines[CurrentLine]->startsWithNamespace()) {
if (!checkEmptyNamespace(AnnotatedLines, CurrentLine, NewLine,
DeletedLines))
return false;
@@ -1489,7 +1504,8 @@ public:
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) override {
assert(Style.Language == FormatStyle::LK_Cpp);
- IsObjC = guessIsObjC(AnnotatedLines, Tokens.getKeywords());
+ IsObjC = guessIsObjC(Env.getSourceManager(), AnnotatedLines,
+ Tokens.getKeywords());
tooling::Replacements Result;
return {Result, 0};
}
@@ -1497,8 +1513,10 @@ public:
bool isObjC() { return IsObjC; }
private:
- static bool guessIsObjC(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- const AdditionalKeywords &Keywords) {
+ static bool
+ guessIsObjC(const SourceManager &SourceManager,
+ const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ const AdditionalKeywords &Keywords) {
// Keep this array sorted, since we are binary searching over it.
static constexpr llvm::StringLiteral FoundationIdentifiers[] = {
"CGFloat",
@@ -1589,9 +1607,15 @@ private:
TT_ObjCBlockLBrace, TT_ObjCBlockLParen,
TT_ObjCDecl, TT_ObjCForIn, TT_ObjCMethodExpr,
TT_ObjCMethodSpecifier, TT_ObjCProperty)) {
+ LLVM_DEBUG(llvm::dbgs()
+ << "Detected ObjC at location "
+ << FormatTok->Tok.getLocation().printToString(
+ SourceManager)
+ << " token: " << FormatTok->TokenText << " token type: "
+ << getTokenTypeName(FormatTok->Type) << "\n");
return true;
}
- if (guessIsObjC(Line->Children, Keywords))
+ if (guessIsObjC(SourceManager, Line->Children, Keywords))
return true;
}
}
@@ -1608,6 +1632,14 @@ struct IncludeDirective {
int Category;
};
+struct JavaImportDirective {
+ StringRef Identifier;
+ StringRef Text;
+ unsigned Offset;
+ std::vector<StringRef> AssociatedCommentLines;
+ bool IsStatic;
+};
+
} // end anonymous namespace
// Determines whether 'Ranges' intersects with ('Start', 'End').
@@ -1726,7 +1758,7 @@ static void sortCppIncludes(const FormatStyle &Style,
namespace {
-const char IncludeRegexPattern[] =
+const char CppIncludeRegexPattern[] =
R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
} // anonymous namespace
@@ -1738,7 +1770,7 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
unsigned *Cursor) {
unsigned Prev = 0;
unsigned SearchFrom = 0;
- llvm::Regex IncludeRegex(IncludeRegexPattern);
+ llvm::Regex IncludeRegex(CppIncludeRegexPattern);
SmallVector<StringRef, 4> Matches;
SmallVector<IncludeDirective, 16> IncludesInBlock;
@@ -1797,6 +1829,149 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
return Replaces;
}
+// Returns group number to use as a first order sort on imports. Gives UINT_MAX
+// if the import does not match any given groups.
+static unsigned findJavaImportGroup(const FormatStyle &Style,
+ StringRef ImportIdentifier) {
+ unsigned LongestMatchIndex = UINT_MAX;
+ unsigned LongestMatchLength = 0;
+ for (unsigned I = 0; I < Style.JavaImportGroups.size(); I++) {
+ std::string GroupPrefix = Style.JavaImportGroups[I];
+ if (ImportIdentifier.startswith(GroupPrefix) &&
+ GroupPrefix.length() > LongestMatchLength) {
+ LongestMatchIndex = I;
+ LongestMatchLength = GroupPrefix.length();
+ }
+ }
+ return LongestMatchIndex;
+}
+
+// Sorts and deduplicates a block of includes given by 'Imports' based on
+// JavaImportGroups, then adding the necessary replacement to 'Replaces'.
+// Import declarations with the same text will be deduplicated. Between each
+// import group, a newline is inserted, and within each import group, a
+// lexicographic sort based on ASCII value is performed.
+static void sortJavaImports(const FormatStyle &Style,
+ const SmallVectorImpl<JavaImportDirective> &Imports,
+ ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ tooling::Replacements &Replaces) {
+ unsigned ImportsBeginOffset = Imports.front().Offset;
+ unsigned ImportsEndOffset =
+ Imports.back().Offset + Imports.back().Text.size();
+ unsigned ImportsBlockSize = ImportsEndOffset - ImportsBeginOffset;
+ if (!affectsRange(Ranges, ImportsBeginOffset, ImportsEndOffset))
+ return;
+ SmallVector<unsigned, 16> Indices;
+ SmallVector<unsigned, 16> JavaImportGroups;
+ for (unsigned i = 0, e = Imports.size(); i != e; ++i) {
+ Indices.push_back(i);
+ JavaImportGroups.push_back(
+ findJavaImportGroup(Style, Imports[i].Identifier));
+ }
+ llvm::sort(Indices.begin(), Indices.end(), [&](unsigned LHSI, unsigned RHSI) {
+ // Negating IsStatic to push static imports above non-static imports.
+ return std::make_tuple(!Imports[LHSI].IsStatic, JavaImportGroups[LHSI],
+ Imports[LHSI].Identifier) <
+ std::make_tuple(!Imports[RHSI].IsStatic, JavaImportGroups[RHSI],
+ Imports[RHSI].Identifier);
+ });
+
+ // Deduplicate imports.
+ Indices.erase(std::unique(Indices.begin(), Indices.end(),
+ [&](unsigned LHSI, unsigned RHSI) {
+ return Imports[LHSI].Text == Imports[RHSI].Text;
+ }),
+ Indices.end());
+
+ bool CurrentIsStatic = Imports[Indices.front()].IsStatic;
+ unsigned CurrentImportGroup = JavaImportGroups[Indices.front()];
+
+ std::string result;
+ for (unsigned Index : Indices) {
+ if (!result.empty()) {
+ result += "\n";
+ if (CurrentIsStatic != Imports[Index].IsStatic ||
+ CurrentImportGroup != JavaImportGroups[Index])
+ result += "\n";
+ }
+ for (StringRef CommentLine : Imports[Index].AssociatedCommentLines) {
+ result += CommentLine;
+ result += "\n";
+ }
+ result += Imports[Index].Text;
+ CurrentIsStatic = Imports[Index].IsStatic;
+ CurrentImportGroup = JavaImportGroups[Index];
+ }
+
+ auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
+ ImportsBlockSize, result));
+ // FIXME: better error handling. For now, just skip the replacement for the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
+}
+
+namespace {
+
+const char JavaImportRegexPattern[] =
+ "^[\t ]*import[\t ]*(static[\t ]*)?([^\t ]*)[\t ]*;";
+
+} // anonymous namespace
+
+tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName,
+ tooling::Replacements &Replaces) {
+ unsigned Prev = 0;
+ unsigned SearchFrom = 0;
+ llvm::Regex ImportRegex(JavaImportRegexPattern);
+ SmallVector<StringRef, 4> Matches;
+ SmallVector<JavaImportDirective, 16> ImportsInBlock;
+ std::vector<StringRef> AssociatedCommentLines;
+
+ bool FormattingOff = false;
+
+ for (;;) {
+ auto Pos = Code.find('\n', SearchFrom);
+ StringRef Line =
+ Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
+
+ StringRef Trimmed = Line.trim();
+ if (Trimmed == "// clang-format off")
+ FormattingOff = true;
+ else if (Trimmed == "// clang-format on")
+ FormattingOff = false;
+
+ if (ImportRegex.match(Line, &Matches)) {
+ if (FormattingOff) {
+ // If at least one import line has formatting turned off, turn off
+ // formatting entirely.
+ return Replaces;
+ }
+ StringRef Static = Matches[1];
+ StringRef Identifier = Matches[2];
+ bool IsStatic = false;
+ if (Static.contains("static")) {
+ IsStatic = true;
+ }
+ ImportsInBlock.push_back({Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
+ AssociatedCommentLines.clear();
+ } else if (Trimmed.size() > 0 && !ImportsInBlock.empty()) {
+ // Associating comments within the imports with the nearest import below
+ AssociatedCommentLines.push_back(Line);
+ }
+ Prev = Pos + 1;
+ if (Pos == StringRef::npos || Pos + 1 == Code.size())
+ break;
+ SearchFrom = Pos + 1;
+ }
+ if (!ImportsInBlock.empty())
+ sortJavaImports(Style, ImportsInBlock, Ranges, FileName, Replaces);
+ return Replaces;
+}
+
bool isMpegTS(StringRef Code) {
// MPEG transport streams use the ".ts" file extension. clang-format should
// not attempt to format those. MPEG TS' frame format starts with 0x47 every
@@ -1819,6 +1994,8 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
return Replaces;
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript)
return sortJavaScriptImports(Style, Code, Ranges, FileName);
+ if (Style.Language == FormatStyle::LanguageKind::LK_Java)
+ return sortJavaImports(Style, Code, Ranges, FileName, Replaces);
sortCppIncludes(Style, Code, Ranges, FileName, Replaces, Cursor);
return Replaces;
}
@@ -1872,7 +2049,8 @@ namespace {
inline bool isHeaderInsertion(const tooling::Replacement &Replace) {
return Replace.getOffset() == UINT_MAX && Replace.getLength() == 0 &&
- llvm::Regex(IncludeRegexPattern).match(Replace.getReplacementText());
+ llvm::Regex(CppIncludeRegexPattern)
+ .match(Replace.getReplacementText());
}
inline bool isHeaderDeletion(const tooling::Replacement &Replace) {
@@ -1925,7 +2103,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
}
}
- llvm::Regex IncludeRegex = llvm::Regex(IncludeRegexPattern);
+ llvm::Regex IncludeRegex = llvm::Regex(CppIncludeRegexPattern);
llvm::SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
@@ -2095,8 +2273,7 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
bool AlternativeOperators = Style.isCpp();
LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
LangOpts.Bool = 1;
- LangOpts.ObjC1 = 1;
- LangOpts.ObjC2 = 1;
+ LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
LangOpts.DeclSpecKeyword = 1; // To get __declspec.
return LangOpts;
@@ -2157,9 +2334,10 @@ const char *DefaultFallbackStyle = "LLVM";
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
- StringRef Code, vfs::FileSystem *FS) {
+ StringRef Code,
+ llvm::vfs::FileSystem *FS) {
if (!FS) {
- FS = vfs::getRealFileSystem().get();
+ FS = llvm::vfs::getRealFileSystem().get();
}
FormatStyle Style = getLLVMStyle();
Style.Language = guessLanguage(FileName, Code);
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 9094e7689e1d..10390c42911b 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -86,6 +86,7 @@ namespace format {
TYPE(RegexLiteral) \
TYPE(SelectorName) \
TYPE(StartOfName) \
+ TYPE(StatementMacro) \
TYPE(StructuredBindingLSquare) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
@@ -188,10 +189,6 @@ struct FormatToken {
bool ClosesTemplateDeclaration = false;
/// Number of parameters, if this is "(", "[" or "<".
- ///
- /// This is initialized to 1 as we don't need to distinguish functions with
- /// 0 parameters from functions with 1 parameter. Thus, we can simply count
- /// the number of commas.
unsigned ParameterCount = 0;
/// Number of parameters that are nested blocks,
@@ -268,7 +265,7 @@ struct FormatToken {
/// \c true if this token ends a binary expression.
bool EndsBinaryExpression = false;
- /// Is this is an operator (or "."/"->") in a sequence of operators
+ /// If this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
@@ -325,6 +322,14 @@ struct FormatToken {
}
template <typename T> bool isNot(T Kind) const { return !is(Kind); }
+ bool closesScopeAfterBlock() const {
+ if (BlockKind == BK_Block)
+ return true;
+ if (closesScope())
+ return Previous->closesScopeAfterBlock();
+ return false;
+ }
+
/// \c true if this token starts a sequence with the given tokens in order,
/// following the ``Next`` pointers, ignoring comments.
template <typename A, typename... Ts>
@@ -520,8 +525,8 @@ struct FormatToken {
const FormatToken *NamespaceTok = this;
if (is(tok::comment))
NamespaceTok = NamespaceTok->getNextNonComment();
- // Detect "(inline)? namespace" in the beginning of a line.
- if (NamespaceTok && NamespaceTok->is(tok::kw_inline))
+ // Detect "(inline|export)? namespace" in the beginning of a line.
+ if (NamespaceTok && NamespaceTok->isOneOf(tok::kw_inline, tok::kw_export))
NamespaceTok = NamespaceTok->getNextNonComment();
return NamespaceTok && NamespaceTok->is(tok::kw_namespace) ? NamespaceTok
: nullptr;
@@ -594,6 +599,8 @@ public:
/// Notifies the \c Role that a comma was found.
virtual void CommaFound(const FormatToken *Token) {}
+ virtual const FormatToken *lastComma() { return nullptr; }
+
protected:
const FormatStyle &Style;
};
@@ -616,6 +623,12 @@ public:
Commas.push_back(Token);
}
+ const FormatToken *lastComma() override {
+ if (Commas.empty())
+ return nullptr;
+ return Commas.back();
+ }
+
private:
/// A struct that holds information on how to format a given list with
/// a specific number of columns.
@@ -672,6 +685,7 @@ struct AdditionalKeywords {
kw_function = &IdentTable.get("function");
kw_get = &IdentTable.get("get");
kw_import = &IdentTable.get("import");
+ kw_infer = &IdentTable.get("infer");
kw_is = &IdentTable.get("is");
kw_let = &IdentTable.get("let");
kw_module = &IdentTable.get("module");
@@ -743,6 +757,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_function;
IdentifierInfo *kw_get;
IdentifierInfo *kw_import;
+ IdentifierInfo *kw_infer;
IdentifierInfo *kw_is;
IdentifierInfo *kw_let;
IdentifierInfo *kw_module;
diff --git a/lib/Format/FormatTokenLexer.cpp b/lib/Format/FormatTokenLexer.cpp
index c7f720a443d3..146f5d68b559 100644
--- a/lib/Format/FormatTokenLexer.cpp
+++ b/lib/Format/FormatTokenLexer.cpp
@@ -37,8 +37,9 @@ FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
- ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
- llvm::sort(ForEachMacros.begin(), ForEachMacros.end());
+ Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
+ for (const std::string &StatementMacro : Style.StatementMacros)
+ Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -657,12 +658,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
}
if (Style.isCpp()) {
+ auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
- std::find(ForEachMacros.begin(), ForEachMacros.end(),
- FormatTok->Tok.getIdentifierInfo()) != ForEachMacros.end()) {
- FormatTok->Type = TT_ForEachMacro;
+ it != Macros.end()) {
+ FormatTok->Type = it->second;
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
FormatTok->Type = TT_MacroBlockBegin;
diff --git a/lib/Format/FormatTokenLexer.h b/lib/Format/FormatTokenLexer.h
index 3b79d27480e3..0cf357c85f3b 100644
--- a/lib/Format/FormatTokenLexer.h
+++ b/lib/Format/FormatTokenLexer.h
@@ -22,6 +22,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
+#include "llvm/ADT/MapVector.h"
#include <stack>
@@ -99,7 +100,8 @@ private:
// Index (in 'Tokens') of the last token that starts a new line.
unsigned FirstInLineIndex;
SmallVector<FormatToken *, 16> Tokens;
- SmallVector<IdentifierInfo *, 8> ForEachMacros;
+
+ llvm::SmallMapVector<IdentifierInfo *, TokenType, 8> Macros;
bool FormattingDisabled;
diff --git a/lib/Format/NamespaceEndCommentsFixer.cpp b/lib/Format/NamespaceEndCommentsFixer.cpp
index 995b3219a1f4..dd364866d1ce 100644
--- a/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -125,12 +125,7 @@ getNamespaceToken(const AnnotatedLine *Line,
if (StartLineIndex > 0)
NamespaceTok = AnnotatedLines[StartLineIndex - 1]->First;
}
- // Detect "(inline)? namespace" in the beginning of a line.
- if (NamespaceTok->is(tok::kw_inline))
- NamespaceTok = NamespaceTok->getNextNonComment();
- if (!NamespaceTok || NamespaceTok->isNot(tok::kw_namespace))
- return nullptr;
- return NamespaceTok;
+ return NamespaceTok->getNamespaceToken();
}
NamespaceEndCommentsFixer::NamespaceEndCommentsFixer(const Environment &Env,
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 3a19215e1803..24c2f998c388 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -366,7 +366,8 @@ private:
// specifier parameter, although this is technically valid:
// [[foo(:)]]
if (AttrTok->is(tok::colon) ||
- AttrTok->startsSequence(tok::identifier, tok::identifier))
+ AttrTok->startsSequence(tok::identifier, tok::identifier) ||
+ AttrTok->startsSequence(tok::r_paren, tok::identifier))
return false;
if (AttrTok->is(tok::ellipsis))
return true;
@@ -398,9 +399,11 @@ private:
bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
Contexts.back().InCpp11AttributeSpecifier;
+ bool InsideInlineASM = Line.startsWith(tok::kw_asm);
bool StartsObjCMethodExpr =
- !CppArrayTemplates && Style.isCpp() && !IsCpp11AttributeSpecifier &&
- Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
+ !InsideInlineASM && !CppArrayTemplates && Style.isCpp() &&
+ !IsCpp11AttributeSpecifier && Contexts.back().CanBeExpression &&
+ Left->isNot(TT_LambdaLSquare) &&
!CurrentToken->isOneOf(tok::l_brace, tok::r_square) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
@@ -1120,6 +1123,7 @@ private:
(Tok.Next->Next->TokenText == "module" ||
Tok.Next->Next->TokenText == "provide" ||
Tok.Next->Next->TokenText == "require" ||
+ Tok.Next->Next->TokenText == "requireType" ||
Tok.Next->Next->TokenText == "forwardDeclare") &&
Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
}
@@ -2517,7 +2521,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Right.MatchingParen->BlockKind != BK_Block))
return !Style.Cpp11BracedListStyle;
if (Left.is(TT_BlockComment))
- return !Left.TokenText.endswith("=*/");
+ // No whitespace in x(/*foo=*/1), except for JavaScript.
+ return Style.Language == FormatStyle::LK_JavaScript ||
+ !Left.TokenText.endswith("=*/");
if (Right.is(tok::l_paren)) {
if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
(Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
@@ -2553,8 +2559,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
- Left.MatchingParen->Previous->is(tok::period))
+ (Left.MatchingParen->Previous->is(tok::period) ||
+ Left.MatchingParen->Previous->is(tok::coloncolon)))
+ // Java call to generic function with explicit type:
// A.<B<C<...>>>DoSomething();
+ // A::<B<C<...>>>DoSomething(); // With a Java 8 method reference.
return false;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
@@ -2774,6 +2783,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (!Style.SpaceBeforeAssignmentOperators &&
Right.getPrecedence() == prec::Assignment)
return false;
+ if (Style.Language == FormatStyle::LK_Java && Right.is(tok::coloncolon) &&
+ (Left.is(tok::identifier) || Left.is(tok::kw_this)))
+ return false;
if (Right.is(tok::coloncolon) && Left.is(tok::identifier))
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
@@ -2866,6 +2878,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
} else if (Style.Language == FormatStyle::LK_Cpp ||
Style.Language == FormatStyle::LK_ObjC ||
Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TableGen ||
Style.Language == FormatStyle::LK_TextProto) {
if (Left.isStringLiteral() && Right.isStringLiteral())
return true;
@@ -3041,6 +3054,30 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
+ // Deal with lambda arguments in C++ - we want consistent line breaks whether
+ // they happen to be at arg0, arg1 or argN. The selection is a bit nuanced
+ // as aggressive line breaks are placed when the lambda is not the last arg.
+ if ((Style.Language == FormatStyle::LK_Cpp ||
+ Style.Language == FormatStyle::LK_ObjC) &&
+ Left.is(tok::l_paren) && Left.BlockParameterCount > 0 &&
+ !Right.isOneOf(tok::l_paren, TT_LambdaLSquare)) {
+ // Multiple lambdas in the same function call force line breaks.
+ if (Left.BlockParameterCount > 1)
+ return true;
+
+ // A lambda followed by another arg forces a line break.
+ if (!Left.Role)
+ return false;
+ auto Comma = Left.Role->lastComma();
+ if (!Comma)
+ return false;
+ auto Next = Comma->getNextNonComment();
+ if (!Next)
+ return false;
+ if (!Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret))
+ return true;
+ }
+
return false;
}
@@ -3078,14 +3115,33 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// Don't wrap between ":" and "!" of a strict prop init ("field!: type;").
if (Left.is(tok::exclaim) && Right.is(tok::colon))
return false;
- if (Right.is(Keywords.kw_is))
- return false;
+ // Look for is type annotations like:
+ // function f(): a is B { ... }
+ // Do not break before is in these cases.
+ if (Right.is(Keywords.kw_is)) {
+ const FormatToken* Next = Right.getNextNonComment();
+ // If `is` is followed by a colon, it's likely that it's a dict key, so
+ // ignore it for this check.
+ // For example this is common in Polymer:
+ // Polymer({
+ // is: 'name',
+ // ...
+ // });
+ if (!Next || !Next->is(tok::colon))
+ return false;
+ }
if (Left.is(Keywords.kw_in))
return Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None;
if (Right.is(Keywords.kw_in))
return Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None;
if (Right.is(Keywords.kw_as))
return false; // must not break before as in 'x as type' casts
+ if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_infer)) {
+ // extends and infer can appear as keywords in conditional types:
+ // https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-8.html#conditional-types
+ // do not break before them, as the expressions are subject to ASI.
+ return false;
+ }
if (Left.is(Keywords.kw_as))
return true;
if (Left.is(TT_JsNonNullAssertion))
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index a3124fcb3d65..e2f2c469d267 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -105,6 +105,13 @@ public:
return !Last->isOneOf(tok::semi, tok::comment);
}
+ /// \c true if this line starts a namespace definition.
+ bool startsWithNamespace() const {
+ return startsWith(tok::kw_namespace) ||
+ startsWith(tok::kw_inline, tok::kw_namespace) ||
+ startsWith(tok::kw_export, tok::kw_namespace);
+ }
+
FormatToken *First;
FormatToken *Last;
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index 906dae40cbee..6b6a9aff461a 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -323,6 +323,10 @@ private:
kwId == clang::tok::objc_synchronized)
return 0;
}
+ // Don't merge block with left brace wrapped after case labels
+ if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
+ I[-1]->First->isOneOf(tok::kw_case, tok::kw_default))
+ return 0;
// Try to merge a block with left brace wrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
return !Style.BraceWrapping.AfterFunction ||
@@ -424,6 +428,8 @@ private:
if (Limit == 0 || I + 1 == E ||
I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
return 0;
+ if (I[0]->Last->is(tok::l_brace) || I[1]->First->is(tok::l_brace))
+ return 0;
unsigned NumStmts = 0;
unsigned Length = 0;
bool EndsWithComment = false;
@@ -483,6 +489,12 @@ private:
if (Line.First->isOneOf(tok::kw_else, tok::kw_case) ||
(Line.First->Next && Line.First->Next->is(tok::kw_else)))
return 0;
+ // default: in switch statement
+ if (Line.First->is(tok::kw_default)) {
+ const FormatToken *Tok = Line.First->getNextNonComment();
+ if (Tok && Tok->is(tok::colon))
+ return 0;
+ }
if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
tok::kw___try, tok::kw_catch, tok::kw___finally,
tok::kw_for, tok::r_brace, Keywords.kw___except)) {
@@ -529,7 +541,7 @@ private:
Tok->SpacesRequiredBefore = 0;
Tok->CanBreakBefore = true;
return 1;
- } else if (Limit != 0 && !Line.startsWith(tok::kw_namespace) &&
+ } else if (Limit != 0 && !Line.startsWithNamespace() &&
!startsExternCBlock(Line)) {
// We don't merge short records.
FormatToken *RecordTok = Line.First;
@@ -976,8 +988,7 @@ private:
Path.push_front(Best);
Best = Best->Previous;
}
- for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
- I != E; ++I) {
+ for (auto I = Path.begin(), E = Path.end(); I != E; ++I) {
unsigned Penalty = 0;
formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
@@ -986,8 +997,8 @@ private:
printLineState((*I)->Previous->State);
if ((*I)->NewLine) {
llvm::dbgs() << "Penalty for placing "
- << (*I)->Previous->State.NextToken->Tok.getName() << ": "
- << Penalty << "\n";
+ << (*I)->Previous->State.NextToken->Tok.getName()
+ << " on a new line: " << Penalty << "\n";
}
});
}
@@ -1154,7 +1165,7 @@ void UnwrappedLineFormatter::formatFirstToken(
// Remove empty lines after "{".
if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
PreviousLine->Last->is(tok::l_brace) &&
- PreviousLine->First->isNot(tok::kw_namespace) &&
+ !PreviousLine->startsWithNamespace() &&
!startsExternCBlock(*PreviousLine))
Newlines = 1;
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index e5afa1264abb..3cd3c8f9cdf6 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -350,7 +350,10 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
break;
case tok::kw_default: {
unsigned StoredPosition = Tokens->getPosition();
- FormatToken *Next = Tokens->getNextToken();
+ FormatToken *Next;
+ do {
+ Next = Tokens->getNextToken();
+ } while (Next && Next->is(tok::comment));
FormatTok = Tokens->setPosition(StoredPosition);
if (Next && Next->isNot(tok::colon)) {
// default not followed by ':' is not a case label; treat it like
@@ -477,6 +480,10 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
}
LBraceStack.pop_back();
break;
+ case tok::identifier:
+ if (!Tok->is(TT_StatementMacro))
+ break;
+ LLVM_FALLTHROUGH;
case tok::at:
case tok::semi:
case tok::kw_if:
@@ -989,13 +996,6 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_namespace:
parseNamespace();
return;
- case tok::kw_inline:
- nextToken();
- if (FormatTok->Tok.is(tok::kw_namespace)) {
- parseNamespace();
- return;
- }
- break;
case tok::kw_public:
case tok::kw_protected:
case tok::kw_private:
@@ -1063,6 +1063,16 @@ void UnwrappedLineParser::parseStructuralElement() {
parseJavaScriptEs6ImportExport();
return;
}
+ if (!Style.isCpp())
+ break;
+ // Handle C++ "(inline|export) namespace".
+ LLVM_FALLTHROUGH;
+ case tok::kw_inline:
+ nextToken();
+ if (FormatTok->Tok.is(tok::kw_namespace)) {
+ parseNamespace();
+ return;
+ }
break;
case tok::identifier:
if (FormatTok->is(TT_ForEachMacro)) {
@@ -1102,6 +1112,10 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
}
+ if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ parseStatementMacro();
+ return;
+ }
// In all other cases, parse the declaration.
break;
default:
@@ -1116,6 +1130,10 @@ void UnwrappedLineParser::parseStructuralElement() {
nextToken();
parseBracedList();
break;
+ } else if (Style.Language == FormatStyle::LK_Java &&
+ FormatTok->is(Keywords.kw_interface)) {
+ nextToken();
+ break;
}
switch (FormatTok->Tok.getObjCKeywordID()) {
case tok::objc_public:
@@ -1260,6 +1278,8 @@ void UnwrappedLineParser::parseStructuralElement() {
break;
case tok::kw_try:
// We arrive here when parsing function-try blocks.
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
parseTryCatch();
return;
case tok::identifier: {
@@ -1301,6 +1321,11 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
+ if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
+ parseStatementMacro();
+ return;
+ }
+
// See if the following token should start a new unwrapped line.
StringRef Text = FormatTok->TokenText;
nextToken();
@@ -2143,6 +2168,8 @@ void UnwrappedLineParser::parseObjCMethod() {
addUnwrappedLine();
return;
} else if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterFunction)
+ addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
return;
@@ -2320,6 +2347,16 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
}
}
+void UnwrappedLineParser::parseStatementMacro()
+{
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+}
+
LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
StringRef Prefix = "") {
llvm::dbgs() << Prefix << "Line(" << Line.Level
diff --git a/lib/Format/UnwrappedLineParser.h b/lib/Format/UnwrappedLineParser.h
index 87254832c635..55d60dff9152 100644
--- a/lib/Format/UnwrappedLineParser.h
+++ b/lib/Format/UnwrappedLineParser.h
@@ -126,6 +126,7 @@ private:
void parseObjCInterfaceOrImplementation();
bool parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
+ void parseStatementMacro();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
void tryToParseJSFunction();
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 7070ce03c864..032b1333322d 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -90,7 +90,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
if (Changes.empty())
return Replaces;
- llvm::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
+ llvm::sort(Changes, Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveDeclarations();
alignConsecutiveAssignments();
@@ -255,8 +255,14 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
Changes[ScopeStack.back()].indentAndNestingLevel())
ScopeStack.pop_back();
+ // Compare current token to previous non-comment token to ensure whether
+ // it is in a deeper scope or not.
+ unsigned PreviousNonComment = i - 1;
+ while (PreviousNonComment > Start &&
+ Changes[PreviousNonComment].Tok->is(tok::comment))
+ PreviousNonComment--;
if (i != Start && Changes[i].indentAndNestingLevel() >
- Changes[i - 1].indentAndNestingLevel())
+ Changes[PreviousNonComment].indentAndNestingLevel())
ScopeStack.push_back(i);
bool InsideNestedScope = ScopeStack.size() != 0;
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index 2a8bfef68eb9..28834a2de8a2 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -193,365 +193,3 @@ void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
std::unique_ptr<ASTConsumer> clang::CreateASTViewer() {
return llvm::make_unique<ASTViewer>();
}
-
-//===----------------------------------------------------------------------===//
-/// DeclContextPrinter - Decl and DeclContext Visualization
-
-namespace {
-
-class DeclContextPrinter : public ASTConsumer {
- raw_ostream& Out;
-public:
- DeclContextPrinter() : Out(llvm::errs()) {}
-
- void HandleTranslationUnit(ASTContext &C) override {
- PrintDeclContext(C.getTranslationUnitDecl(), 4);
- }
-
- void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
-};
-} // end anonymous namespace
-
-void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
- unsigned Indentation) {
- // Print DeclContext name.
- switch (DC->getDeclKind()) {
- case Decl::TranslationUnit:
- Out << "[translation unit] " << DC;
- break;
- case Decl::Namespace: {
- Out << "[namespace] ";
- const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
- Out << *ND;
- break;
- }
- case Decl::Enum: {
- const EnumDecl* ED = cast<EnumDecl>(DC);
- if (ED->isCompleteDefinition())
- Out << "[enum] ";
- else
- Out << "<enum> ";
- Out << *ED;
- break;
- }
- case Decl::Record: {
- const RecordDecl* RD = cast<RecordDecl>(DC);
- if (RD->isCompleteDefinition())
- Out << "[struct] ";
- else
- Out << "<struct> ";
- Out << *RD;
- break;
- }
- case Decl::CXXRecord: {
- const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
- if (RD->isCompleteDefinition())
- Out << "[class] ";
- else
- Out << "<class> ";
- Out << *RD << ' ' << DC;
- break;
- }
- case Decl::ObjCMethod:
- Out << "[objc method]";
- break;
- case Decl::ObjCInterface:
- Out << "[objc interface]";
- break;
- case Decl::ObjCCategory:
- Out << "[objc category]";
- break;
- case Decl::ObjCProtocol:
- Out << "[objc protocol]";
- break;
- case Decl::ObjCImplementation:
- Out << "[objc implementation]";
- break;
- case Decl::ObjCCategoryImpl:
- Out << "[objc categoryimpl]";
- break;
- case Decl::LinkageSpec:
- Out << "[linkage spec]";
- break;
- case Decl::Block:
- Out << "[block]";
- break;
- case Decl::Function: {
- const FunctionDecl* FD = cast<FunctionDecl>(DC);
- if (FD->doesThisDeclarationHaveABody())
- Out << "[function] ";
- else
- Out << "<function> ";
- Out << *FD;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (auto I : FD->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *I;
- }
- Out << ")";
- break;
- }
- case Decl::CXXMethod: {
- const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ method] ";
- else if (D->isImplicit())
- Out << "(c++ method) ";
- else
- Out << "<c++ method> ";
- Out << *D;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (ParmVarDecl *Parameter : D->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *Parameter;
- }
- Out << ")";
-
- // Check the semantic DeclContext.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
-
- break;
- }
- case Decl::CXXConstructor: {
- const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ ctor] ";
- else if (D->isImplicit())
- Out << "(c++ ctor) ";
- else
- Out << "<c++ ctor> ";
- Out << *D;
- // Print the parameters.
- Out << "(";
- bool PrintComma = false;
- for (ParmVarDecl *Parameter : D->parameters()) {
- if (PrintComma)
- Out << ", ";
- else
- PrintComma = true;
- Out << *Parameter;
- }
- Out << ")";
-
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
- case Decl::CXXDestructor: {
- const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ dtor] ";
- else if (D->isImplicit())
- Out << "(c++ dtor) ";
- else
- Out << "<c++ dtor> ";
- Out << *D;
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
- case Decl::CXXConversion: {
- const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
- if (D->isOutOfLine())
- Out << "[c++ conversion] ";
- else if (D->isImplicit())
- Out << "(c++ conversion) ";
- else
- Out << "<c++ conversion> ";
- Out << *D;
- // Check the semantic DC.
- const DeclContext* SemaDC = D->getDeclContext();
- const DeclContext* LexicalDC = D->getLexicalDeclContext();
- if (SemaDC != LexicalDC)
- Out << " [[" << SemaDC << "]]";
- break;
- }
-
- case Decl::ClassTemplateSpecialization: {
- const auto *CTSD = cast<ClassTemplateSpecializationDecl>(DC);
- if (CTSD->isCompleteDefinition())
- Out << "[class template specialization] ";
- else
- Out << "<class template specialization> ";
- Out << *CTSD;
- break;
- }
-
- case Decl::ClassTemplatePartialSpecialization: {
- const auto *CTPSD = cast<ClassTemplatePartialSpecializationDecl>(DC);
- if (CTPSD->isCompleteDefinition())
- Out << "[class template partial specialization] ";
- else
- Out << "<class template partial specialization> ";
- Out << *CTPSD;
- break;
- }
-
- default:
- llvm_unreachable("a decl that inherits DeclContext isn't handled");
- }
-
- Out << "\n";
-
- // Print decls in the DeclContext.
- for (auto *I : DC->decls()) {
- for (unsigned i = 0; i < Indentation; ++i)
- Out << " ";
-
- Decl::Kind DK = I->getKind();
- switch (DK) {
- case Decl::Namespace:
- case Decl::Enum:
- case Decl::Record:
- case Decl::CXXRecord:
- case Decl::ObjCMethod:
- case Decl::ObjCInterface:
- case Decl::ObjCCategory:
- case Decl::ObjCProtocol:
- case Decl::ObjCImplementation:
- case Decl::ObjCCategoryImpl:
- case Decl::LinkageSpec:
- case Decl::Block:
- case Decl::Function:
- case Decl::CXXMethod:
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- case Decl::CXXConversion:
- case Decl::ClassTemplateSpecialization:
- case Decl::ClassTemplatePartialSpecialization: {
- DeclContext* DC = cast<DeclContext>(I);
- PrintDeclContext(DC, Indentation+2);
- break;
- }
- case Decl::IndirectField: {
- IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(I);
- Out << "<IndirectField> " << *IFD << '\n';
- break;
- }
- case Decl::Label: {
- LabelDecl *LD = cast<LabelDecl>(I);
- Out << "<Label> " << *LD << '\n';
- break;
- }
- case Decl::Field: {
- FieldDecl *FD = cast<FieldDecl>(I);
- Out << "<field> " << *FD << '\n';
- break;
- }
- case Decl::Typedef:
- case Decl::TypeAlias: {
- TypedefNameDecl* TD = cast<TypedefNameDecl>(I);
- Out << "<typedef> " << *TD << '\n';
- break;
- }
- case Decl::EnumConstant: {
- EnumConstantDecl* ECD = cast<EnumConstantDecl>(I);
- Out << "<enum constant> " << *ECD << '\n';
- break;
- }
- case Decl::Var: {
- VarDecl* VD = cast<VarDecl>(I);
- Out << "<var> " << *VD << '\n';
- break;
- }
- case Decl::ImplicitParam: {
- ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(I);
- Out << "<implicit parameter> " << *IPD << '\n';
- break;
- }
- case Decl::ParmVar: {
- ParmVarDecl* PVD = cast<ParmVarDecl>(I);
- Out << "<parameter> " << *PVD << '\n';
- break;
- }
- case Decl::ObjCProperty: {
- ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(I);
- Out << "<objc property> " << *OPD << '\n';
- break;
- }
- case Decl::FunctionTemplate: {
- FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(I);
- Out << "<function template> " << *FTD << '\n';
- break;
- }
- case Decl::FileScopeAsm: {
- Out << "<file-scope asm>\n";
- break;
- }
- case Decl::UsingDirective: {
- Out << "<using directive>\n";
- break;
- }
- case Decl::NamespaceAlias: {
- NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(I);
- Out << "<namespace alias> " << *NAD << '\n';
- break;
- }
- case Decl::ClassTemplate: {
- ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(I);
- Out << "<class template> " << *CTD << '\n';
- break;
- }
- case Decl::OMPThreadPrivate: {
- Out << "<omp threadprivate> " << '"' << I << "\"\n";
- break;
- }
- case Decl::Friend: {
- Out << "<friend>";
- if (const NamedDecl *ND = cast<FriendDecl>(I)->getFriendDecl())
- Out << ' ' << *ND;
- Out << "\n";
- break;
- }
- case Decl::Using: {
- Out << "<using> " << *cast<UsingDecl>(I) << "\n";
- break;
- }
- case Decl::UsingShadow: {
- Out << "<using shadow> " << *cast<UsingShadowDecl>(I) << "\n";
- break;
- }
- case Decl::Empty: {
- Out << "<empty>\n";
- break;
- }
- case Decl::AccessSpec: {
- Out << "<access specifier>\n";
- break;
- }
- case Decl::VarTemplate: {
- Out << "<var template> " << *cast<VarTemplateDecl>(I) << "\n";
- break;
- }
- case Decl::StaticAssert: {
- Out << "<static assert>\n";
- break;
- }
- default:
- Out << "DeclKind: " << DK << '"' << I << "\"\n";
- llvm_unreachable("decl unhandled");
- }
- }
-}
-std::unique_ptr<ASTConsumer> clang::CreateDeclContextPrinter() {
- return llvm::make_unique<DeclContextPrinter>();
-}
diff --git a/lib/Frontend/ASTMerge.cpp b/lib/Frontend/ASTMerge.cpp
index 2434113ab0db..4f622da118c5 100644
--- a/lib/Frontend/ASTMerge.cpp
+++ b/lib/Frontend/ASTMerge.cpp
@@ -10,6 +10,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTImporter.h"
+#include "clang/AST/ASTImporterLookupTable.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
@@ -38,6 +39,8 @@ void ASTMergeAction::ExecuteAction() {
&CI.getASTContext());
IntrusiveRefCntPtr<DiagnosticIDs>
DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
+ ASTImporterLookupTable LookupTable(
+ *CI.getASTContext().getTranslationUnitDecl());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
IntrusiveRefCntPtr<DiagnosticsEngine>
Diags(new DiagnosticsEngine(DiagIDs, &CI.getDiagnosticOpts(),
@@ -51,11 +54,9 @@ void ASTMergeAction::ExecuteAction() {
if (!Unit)
continue;
- ASTImporter Importer(CI.getASTContext(),
- CI.getFileManager(),
- Unit->getASTContext(),
- Unit->getFileManager(),
- /*MinimalImport=*/false);
+ ASTImporter Importer(CI.getASTContext(), CI.getFileManager(),
+ Unit->getASTContext(), Unit->getFileManager(),
+ /*MinimalImport=*/false, &LookupTable);
TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
for (auto *D : TU->decls()) {
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index e4c313fed30f..c7b2551cb8d7 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -37,7 +37,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendAction.h"
@@ -45,7 +44,6 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/MultiplexConsumer.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
@@ -64,6 +62,7 @@
#include "clang/Serialization/ASTWriter.h"
#include "clang/Serialization/ContinuousRangeMap.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
@@ -88,6 +87,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <atomic>
@@ -155,9 +155,8 @@ static bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
/// and file-to-buffer remappings inside \p Invocation.
static std::unique_ptr<llvm::MemoryBuffer>
getBufferForFileHandlingRemapping(const CompilerInvocation &Invocation,
- vfs::FileSystem *VFS,
- StringRef FilePath,
- bool isVolatile) {
+ llvm::vfs::FileSystem *VFS,
+ StringRef FilePath, bool isVolatile) {
const auto &PreprocessorOpts = Invocation.getPreprocessorOpts();
// Try to determine if the main file has been remapped, either from the
@@ -283,7 +282,7 @@ void ASTUnit::enableSourceFileDiagnostics() {
/// Determine the set of code-completion contexts in which this
/// declaration should be shown.
-static unsigned getDeclShowContexts(const NamedDecl *ND,
+static uint64_t getDeclShowContexts(const NamedDecl *ND,
const LangOptions &LangOpts,
bool &IsNestedNameSpecifier) {
IsNestedNameSpecifier = false;
@@ -437,14 +436,15 @@ void ASTUnit::CacheCodeCompletionResults() {
| (1LL << CodeCompletionContext::CCC_UnionTag)
| (1LL << CodeCompletionContext::CCC_ClassOrStructTag)
| (1LL << CodeCompletionContext::CCC_Type)
- | (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName)
+ | (1LL << CodeCompletionContext::CCC_Symbol)
+ | (1LL << CodeCompletionContext::CCC_SymbolOrNewName)
| (1LL << CodeCompletionContext::CCC_ParenthesizedExpression);
if (isa<NamespaceDecl>(R.Declaration) ||
isa<NamespaceAliasDecl>(R.Declaration))
NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace);
- if (unsigned RemainingContexts
+ if (uint64_t RemainingContexts
= NNSContexts & ~CachedResult.ShowInContexts) {
// If there any contexts where this completion can be a
// nested-name-specifier but isn't already an option, create a
@@ -752,7 +752,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
- IntrusiveRefCntPtr<vfs::FileSystem> VFS = vfs::getRealFileSystem();
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
+ llvm::vfs::getRealFileSystem();
AST->FileMgr = new FileManager(FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
@@ -1074,7 +1075,7 @@ static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
/// contain any translation-unit information, false otherwise.
bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1082,7 +1083,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (OverrideMainBuffer) {
assert(Preamble &&
"No preamble was built, but OverrideMainBuffer is not null");
- IntrusiveRefCntPtr<vfs::FileSystem> OldVFS = VFS;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> OldVFS = VFS;
Preamble->AddImplicitPreamble(*CCInvocation, VFS, OverrideMainBuffer.get());
if (OldVFS != VFS && FileMgr) {
assert(OldVFS == FileMgr->getVirtualFileSystem() &&
@@ -1279,7 +1280,7 @@ std::unique_ptr<llvm::MemoryBuffer>
ASTUnit::getMainBufferWithPrecompiledPreamble(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
CompilerInvocation &PreambleInvocationIn,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS, bool AllowRebuild,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS, bool AllowRebuild,
unsigned MaxLines) {
auto MainFilePath =
PreambleInvocationIn.getFrontendOpts().Inputs[0].getFile();
@@ -1363,7 +1364,6 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
} else {
switch (static_cast<BuildPreambleError>(NewPreamble.getError().value())) {
case BuildPreambleError::CouldntCreateTempFile:
- case BuildPreambleError::PreambleIsEmpty:
// Try again next time.
PreambleRebuildCounter = 1;
return nullptr;
@@ -1469,7 +1469,7 @@ ASTUnit::create(std::shared_ptr<CompilerInvocation> CI,
bool CaptureDiagnostics, bool UserFilesAreVolatile) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(false));
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(*CI, *Diags);
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
@@ -1631,7 +1631,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
bool ASTUnit::LoadFromCompilerInvocation(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
unsigned PrecompilePreambleAfterNParses,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1710,7 +1710,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies,
bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization,
llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
@@ -1755,7 +1755,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
if (!VFS)
- VFS = vfs::getRealFileSystem();
+ VFS = llvm::vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
AST->PCMCache = new MemoryBufferCache;
@@ -1795,7 +1795,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ArrayRef<RemappedFile> RemappedFiles,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Invocation)
return true;
@@ -1912,8 +1912,10 @@ namespace {
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
- unsigned NumCandidates) override {
- Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
+ unsigned NumCandidates,
+ SourceLocation OpenParLoc) override {
+ Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates,
+ OpenParLoc);
}
CodeCompletionAllocator &getAllocator() override {
@@ -1950,8 +1952,8 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ObjCPropertyAccess:
case CodeCompletionContext::CCC_Namespace:
case CodeCompletionContext::CCC_Type:
- case CodeCompletionContext::CCC_Name:
- case CodeCompletionContext::CCC_PotentiallyQualifiedName:
+ case CodeCompletionContext::CCC_Symbol:
+ case CodeCompletionContext::CCC_SymbolOrNewName:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_ObjCInterfaceName:
break;
@@ -1975,6 +1977,8 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ObjCInstanceMessage:
case CodeCompletionContext::CCC_ObjCClassMessage:
case CodeCompletionContext::CCC_ObjCCategoryName:
+ case CodeCompletionContext::CCC_IncludedFile:
+ case CodeCompletionContext::CCC_NewName:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -2644,9 +2648,9 @@ InputKind ASTUnit::getInputKind() const {
else if (LangOpts.RenderScript)
Lang = InputKind::RenderScript;
else if (LangOpts.CPlusPlus)
- Lang = LangOpts.ObjC1 ? InputKind::ObjCXX : InputKind::CXX;
+ Lang = LangOpts.ObjC ? InputKind::ObjCXX : InputKind::CXX;
else
- Lang = LangOpts.ObjC1 ? InputKind::ObjC : InputKind::C;
+ Lang = LangOpts.ObjC ? InputKind::ObjC : InputKind::C;
InputKind::Format Fmt = InputKind::Source;
if (LangOpts.getCompilingModule() == LangOptions::CMK_ModuleMap)
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 6161b46a9dc5..3bd159537b6a 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -16,10 +16,8 @@ add_clang_library(clangFrontend
ASTConsumers.cpp
ASTMerge.cpp
ASTUnit.cpp
- CacheTokens.cpp
ChainedDiagnosticConsumer.cpp
ChainedIncludesSource.cpp
- CodeGenOptions.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
CreateInvocationFromCommandLine.cpp
@@ -38,7 +36,6 @@ add_clang_library(clangFrontend
LogDiagnosticPrinter.cpp
ModuleDependencyCollector.cpp
MultiplexConsumer.cpp
- PCHContainerOperations.cpp
PrecompiledPreamble.cpp
PrintPreprocessedOutput.cpp
SerializedDiagnosticPrinter.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
deleted file mode 100644
index c4504a14456d..000000000000
--- a/lib/Frontend/CacheTokens.cpp
+++ /dev/null
@@ -1,700 +0,0 @@
-//===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a possible implementation of PTH support for Clang that is
-// based on caching lexed tokens and identifiers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/Utils.h"
-#include "clang/Lex/Lexer.h"
-#include "clang/Lex/PTHManager.h"
-#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/DJB.h"
-#include "llvm/Support/EndianStream.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
-#include "llvm/Support/Path.h"
-
-// FIXME: put this somewhere else?
-#ifndef S_ISDIR
-#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
-#endif
-
-using namespace clang;
-
-//===----------------------------------------------------------------------===//
-// PTH-specific stuff.
-//===----------------------------------------------------------------------===//
-
-typedef uint32_t Offset;
-
-namespace {
-class PTHEntry {
- Offset TokenData, PPCondData;
-
-public:
- PTHEntry() {}
-
- PTHEntry(Offset td, Offset ppcd)
- : TokenData(td), PPCondData(ppcd) {}
-
- Offset getTokenOffset() const { return TokenData; }
- Offset getPPCondTableOffset() const { return PPCondData; }
-};
-
-
-class PTHEntryKeyVariant {
- union {
- const FileEntry *FE;
- // FIXME: Use "StringRef Path;" when MSVC 2013 is dropped.
- const char *PathPtr;
- };
- size_t PathSize;
- enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
- FileData *Data;
-
-public:
- PTHEntryKeyVariant(const FileEntry *fe) : FE(fe), Kind(IsFE), Data(nullptr) {}
-
- PTHEntryKeyVariant(FileData *Data, StringRef Path)
- : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsDE),
- Data(new FileData(*Data)) {}
-
- explicit PTHEntryKeyVariant(StringRef Path)
- : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsNoExist),
- Data(nullptr) {}
-
- bool isFile() const { return Kind == IsFE; }
-
- StringRef getString() const {
- return Kind == IsFE ? FE->getName() : StringRef(PathPtr, PathSize);
- }
-
- unsigned getKind() const { return (unsigned) Kind; }
-
- void EmitData(raw_ostream& Out) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
- switch (Kind) {
- case IsFE: {
- // Emit stat information.
- llvm::sys::fs::UniqueID UID = FE->getUniqueID();
- LE.write<uint64_t>(UID.getFile());
- LE.write<uint64_t>(UID.getDevice());
- LE.write<uint64_t>(FE->getModificationTime());
- LE.write<uint64_t>(FE->getSize());
- } break;
- case IsDE:
- // Emit stat information.
- LE.write<uint64_t>(Data->UniqueID.getFile());
- LE.write<uint64_t>(Data->UniqueID.getDevice());
- LE.write<uint64_t>(Data->ModTime);
- LE.write<uint64_t>(Data->Size);
- delete Data;
- break;
- default:
- break;
- }
- }
-
- unsigned getRepresentationLength() const {
- return Kind == IsNoExist ? 0 : 4 * 8;
- }
-};
-
-class FileEntryPTHEntryInfo {
-public:
- typedef PTHEntryKeyVariant key_type;
- typedef key_type key_type_ref;
-
- typedef PTHEntry data_type;
- typedef const PTHEntry& data_type_ref;
-
- typedef unsigned hash_value_type;
- typedef unsigned offset_type;
-
- static hash_value_type ComputeHash(PTHEntryKeyVariant V) {
- return llvm::djbHash(V.getString());
- }
-
- static std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
- const PTHEntry& E) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
-
- unsigned n = V.getString().size() + 1 + 1;
- LE.write<uint16_t>(n);
-
- unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
- LE.write<uint8_t>(m);
-
- return std::make_pair(n, m);
- }
-
- static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
- using namespace llvm::support;
- // Emit the entry kind.
- Out << char(V.getKind());
- // Emit the string.
- Out.write(V.getString().data(), n - 1);
- }
-
- static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
- const PTHEntry& E, unsigned) {
- using namespace llvm::support;
- endian::Writer LE(Out, little);
-
- // For file entries emit the offsets into the PTH file for token data
- // and the preprocessor blocks table.
- if (V.isFile()) {
- LE.write<uint32_t>(E.getTokenOffset());
- LE.write<uint32_t>(E.getPPCondTableOffset());
- }
-
- // Emit any other data associated with the key (i.e., stat information).
- V.EmitData(Out);
- }
-};
-
-class OffsetOpt {
- bool valid;
- Offset off;
-public:
- OffsetOpt() : valid(false) {}
- bool hasOffset() const { return valid; }
- Offset getOffset() const { assert(valid); return off; }
- void setOffset(Offset o) { off = o; valid = true; }
-};
-} // end anonymous namespace
-
-typedef llvm::OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
-
-namespace {
-class PTHWriter {
- typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
- typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
-
- raw_pwrite_stream &Out;
- Preprocessor& PP;
- IDMap IM;
- std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
- PTHMap PM;
- CachedStrsTy CachedStrs;
- uint32_t idcount;
- Offset CurStrOffset;
-
- //// Get the persistent id for the given IdentifierInfo*.
- uint32_t ResolveID(const IdentifierInfo* II);
-
- /// Emit a token to the PTH file.
- void EmitToken(const Token& T);
-
- void Emit8(uint32_t V) {
- Out << char(V);
- }
-
- void Emit16(uint32_t V) {
- using namespace llvm::support;
- endian::write<uint16_t>(Out, V, little);
- }
-
- void Emit32(uint32_t V) {
- using namespace llvm::support;
- endian::write<uint32_t>(Out, V, little);
- }
-
- void EmitBuf(const char *Ptr, unsigned NumBytes) {
- Out.write(Ptr, NumBytes);
- }
-
- void EmitString(StringRef V) {
- using namespace llvm::support;
- endian::write<uint16_t>(Out, V.size(), little);
- EmitBuf(V.data(), V.size());
- }
-
- /// EmitIdentifierTable - Emits two tables to the PTH file. The first is
- /// a hashtable mapping from identifier strings to persistent IDs.
- /// The second is a straight table mapping from persistent IDs to string data
- /// (the keys of the first table).
- std::pair<Offset, Offset> EmitIdentifierTable();
-
- /// EmitFileTable - Emit a table mapping from file name strings to PTH
- /// token data.
- Offset EmitFileTable() { return PM.Emit(Out); }
-
- PTHEntry LexTokens(Lexer& L);
- Offset EmitCachedSpellings();
-
-public:
- PTHWriter(raw_pwrite_stream &out, Preprocessor &pp)
- : Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
-
- PTHMap &getPM() { return PM; }
- void GeneratePTH(StringRef MainFile);
-};
-} // end anonymous namespace
-
-uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
- // Null IdentifierInfo's map to the persistent ID 0.
- if (!II)
- return 0;
-
- IDMap::iterator I = IM.find(II);
- if (I != IM.end())
- return I->second; // We've already added 1.
-
- IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
- return idcount;
-}
-
-void PTHWriter::EmitToken(const Token& T) {
- // Emit the token kind, flags, and length.
- Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
- (((uint32_t) T.getLength()) << 16));
-
- if (!T.isLiteral()) {
- Emit32(ResolveID(T.getIdentifierInfo()));
- } else {
- // We cache *un-cleaned* spellings. This gives us 100% fidelity with the
- // source code.
- StringRef s(T.getLiteralData(), T.getLength());
-
- // Get the string entry.
- auto &E = *CachedStrs.insert(std::make_pair(s, OffsetOpt())).first;
-
- // If this is a new string entry, bump the PTH offset.
- if (!E.second.hasOffset()) {
- E.second.setOffset(CurStrOffset);
- StrEntries.push_back(&E);
- CurStrOffset += s.size() + 1;
- }
-
- // Emit the relative offset into the PTH file for the spelling string.
- Emit32(E.second.getOffset());
- }
-
- // Emit the offset into the original source file of this token so that we
- // can reconstruct its SourceLocation.
- Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
-}
-
-PTHEntry PTHWriter::LexTokens(Lexer& L) {
- // Pad 0's so that we emit tokens to a 4-byte alignment.
- // This speed up reading them back in.
- using namespace llvm::support;
- endian::Writer LE(Out, little);
- uint32_t TokenOff = Out.tell();
- for (uint64_t N = llvm::OffsetToAlignment(TokenOff, 4); N; --N, ++TokenOff)
- LE.write<uint8_t>(0);
-
- // Keep track of matching '#if' ... '#endif'.
- typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
- PPCondTable PPCond;
- std::vector<unsigned> PPStartCond;
- bool ParsingPreprocessorDirective = false;
- Token Tok;
-
- do {
- L.LexFromRawLexer(Tok);
- NextToken:
-
- if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
- ParsingPreprocessorDirective) {
- // Insert an eod token into the token cache. It has the same
- // position as the next token that is not on the same line as the
- // preprocessor directive. Observe that we continue processing
- // 'Tok' when we exit this branch.
- Token Tmp = Tok;
- Tmp.setKind(tok::eod);
- Tmp.clearFlag(Token::StartOfLine);
- Tmp.setIdentifierInfo(nullptr);
- EmitToken(Tmp);
- ParsingPreprocessorDirective = false;
- }
-
- if (Tok.is(tok::raw_identifier)) {
- PP.LookUpIdentifierInfo(Tok);
- EmitToken(Tok);
- continue;
- }
-
- if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
- // Special processing for #include. Store the '#' token and lex
- // the next token.
- assert(!ParsingPreprocessorDirective);
- Offset HashOff = (Offset) Out.tell();
-
- // Get the next token.
- Token NextTok;
- L.LexFromRawLexer(NextTok);
-
- // If we see the start of line, then we had a null directive "#". In
- // this case, discard both tokens.
- if (NextTok.isAtStartOfLine())
- goto NextToken;
-
- // The token is the start of a directive. Emit it.
- EmitToken(Tok);
- Tok = NextTok;
-
- // Did we see 'include'/'import'/'include_next'?
- if (Tok.isNot(tok::raw_identifier)) {
- EmitToken(Tok);
- continue;
- }
-
- IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
- tok::PPKeywordKind K = II->getPPKeywordID();
-
- ParsingPreprocessorDirective = true;
-
- switch (K) {
- case tok::pp_not_keyword:
- // Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
- // them through.
- default:
- break;
-
- case tok::pp_include:
- case tok::pp_import:
- case tok::pp_include_next: {
- // Save the 'include' token.
- EmitToken(Tok);
- // Lex the next token as an include string.
- L.setParsingPreprocessorDirective(true);
- L.LexIncludeFilename(Tok);
- L.setParsingPreprocessorDirective(false);
- assert(!Tok.isAtStartOfLine());
- if (Tok.is(tok::raw_identifier))
- PP.LookUpIdentifierInfo(Tok);
-
- break;
- }
- case tok::pp_if:
- case tok::pp_ifdef:
- case tok::pp_ifndef: {
- // Add an entry for '#if' and friends. We initially set the target
- // index to 0. This will get backpatched when we hit #endif.
- PPStartCond.push_back(PPCond.size());
- PPCond.push_back(std::make_pair(HashOff, 0U));
- break;
- }
- case tok::pp_endif: {
- // Add an entry for '#endif'. We set the target table index to itself.
- // This will later be set to zero when emitting to the PTH file. We
- // use 0 for uninitialized indices because that is easier to debug.
- unsigned index = PPCond.size();
- // Backpatch the opening '#if' entry.
- assert(!PPStartCond.empty());
- assert(PPCond.size() > PPStartCond.back());
- assert(PPCond[PPStartCond.back()].second == 0);
- PPCond[PPStartCond.back()].second = index;
- PPStartCond.pop_back();
- // Add the new entry to PPCond.
- PPCond.push_back(std::make_pair(HashOff, index));
- EmitToken(Tok);
-
- // Some files have gibberish on the same line as '#endif'.
- // Discard these tokens.
- do
- L.LexFromRawLexer(Tok);
- while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
- // We have the next token in hand.
- // Don't immediately lex the next one.
- goto NextToken;
- }
- case tok::pp_elif:
- case tok::pp_else: {
- // Add an entry for #elif or #else.
- // This serves as both a closing and opening of a conditional block.
- // This means that its entry will get backpatched later.
- unsigned index = PPCond.size();
- // Backpatch the previous '#if' entry.
- assert(!PPStartCond.empty());
- assert(PPCond.size() > PPStartCond.back());
- assert(PPCond[PPStartCond.back()].second == 0);
- PPCond[PPStartCond.back()].second = index;
- PPStartCond.pop_back();
- // Now add '#elif' as a new block opening.
- PPCond.push_back(std::make_pair(HashOff, 0U));
- PPStartCond.push_back(index);
- break;
- }
- }
- }
-
- EmitToken(Tok);
- }
- while (Tok.isNot(tok::eof));
-
- assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
-
- // Next write out PPCond.
- Offset PPCondOff = (Offset) Out.tell();
-
- // Write out the size of PPCond so that clients can identifer empty tables.
- Emit32(PPCond.size());
-
- for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
- Emit32(PPCond[i].first - TokenOff);
- uint32_t x = PPCond[i].second;
- assert(x != 0 && "PPCond entry not backpatched.");
- // Emit zero for #endifs. This allows us to do checking when
- // we read the PTH file back in.
- Emit32(x == i ? 0 : x);
- }
-
- return PTHEntry(TokenOff, PPCondOff);
-}
-
-Offset PTHWriter::EmitCachedSpellings() {
- // Write each cached strings to the PTH file.
- Offset SpellingsOff = Out.tell();
-
- for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
- I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
- EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
-
- return SpellingsOff;
-}
-
-static uint32_t swap32le(uint32_t X) {
- return llvm::support::endian::byte_swap<uint32_t, llvm::support::little>(X);
-}
-
-static void pwrite32le(raw_pwrite_stream &OS, uint32_t Val, uint64_t &Off) {
- uint32_t LEVal = swap32le(Val);
- OS.pwrite(reinterpret_cast<const char *>(&LEVal), 4, Off);
- Off += 4;
-}
-
-void PTHWriter::GeneratePTH(StringRef MainFile) {
- // Generate the prologue.
- Out << "cfe-pth" << '\0';
- Emit32(PTHManager::Version);
-
- // Leave 4 words for the prologue.
- Offset PrologueOffset = Out.tell();
- for (unsigned i = 0; i < 4; ++i)
- Emit32(0);
-
- // Write the name of the MainFile.
- if (!MainFile.empty()) {
- EmitString(MainFile);
- } else {
- // String with 0 bytes.
- Emit16(0);
- }
- Emit8(0);
-
- // Iterate over all the files in SourceManager. Create a lexer
- // for each file and cache the tokens.
- SourceManager &SM = PP.getSourceManager();
- const LangOptions &LOpts = PP.getLangOpts();
-
- for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
- E = SM.fileinfo_end(); I != E; ++I) {
- const SrcMgr::ContentCache &C = *I->second;
- const FileEntry *FE = C.OrigEntry;
-
- // FIXME: Handle files with non-absolute paths.
- if (llvm::sys::path::is_relative(FE->getName()))
- continue;
-
- const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
- if (!B) continue;
-
- FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
- const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
- Lexer L(FID, FromFile, SM, LOpts);
- PM.insert(FE, LexTokens(L));
- }
-
- // Write out the identifier table.
- const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
-
- // Write out the cached strings table.
- Offset SpellingOff = EmitCachedSpellings();
-
- // Write out the file table.
- Offset FileTableOff = EmitFileTable();
-
- // Finally, write the prologue.
- uint64_t Off = PrologueOffset;
- pwrite32le(Out, IdTableOff.first, Off);
- pwrite32le(Out, IdTableOff.second, Off);
- pwrite32le(Out, FileTableOff, Off);
- pwrite32le(Out, SpellingOff, Off);
-}
-
-namespace {
-/// StatListener - A simple "interpose" object used to monitor stat calls
-/// invoked by FileManager while processing the original sources used
-/// as input to PTH generation. StatListener populates the PTHWriter's
-/// file map with stat information for directories as well as negative stats.
-/// Stat information for files are populated elsewhere.
-class StatListener : public FileSystemStatCache {
- PTHMap &PM;
-public:
- StatListener(PTHMap &pm) : PM(pm) {}
- ~StatListener() override {}
-
- LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- vfs::FileSystem &FS) override {
- LookupResult Result = statChained(Path, Data, isFile, F, FS);
-
- if (Result == CacheMissing) // Failed 'stat'.
- PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
- else if (Data.IsDirectory) {
- // Only cache directories with absolute paths.
- if (llvm::sys::path::is_relative(Path))
- return Result;
-
- PM.insert(PTHEntryKeyVariant(&Data, Path), PTHEntry());
- }
-
- return Result;
- }
-};
-} // end anonymous namespace
-
-void clang::CacheTokens(Preprocessor &PP, raw_pwrite_stream *OS) {
- // Get the name of the main file.
- const SourceManager &SrcMgr = PP.getSourceManager();
- const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
- SmallString<128> MainFilePath(MainFile->getName());
-
- llvm::sys::fs::make_absolute(MainFilePath);
-
- // Create the PTHWriter.
- PTHWriter PW(*OS, PP);
-
- // Install the 'stat' system call listener in the FileManager.
- auto StatCacheOwner = llvm::make_unique<StatListener>(PW.getPM());
- StatListener *StatCache = StatCacheOwner.get();
- PP.getFileManager().addStatCache(std::move(StatCacheOwner),
- /*AtBeginning=*/true);
-
- // Lex through the entire file. This will populate SourceManager with
- // all of the header information.
- Token Tok;
- PP.EnterMainSourceFile();
- do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
-
- // Generate the PTH file.
- PP.getFileManager().removeStatCache(StatCache);
- PW.GeneratePTH(MainFilePath.str());
-}
-
-//===----------------------------------------------------------------------===//
-
-namespace {
-class PTHIdKey {
-public:
- const IdentifierInfo* II;
- uint32_t FileOffset;
-};
-
-class PTHIdentifierTableTrait {
-public:
- typedef PTHIdKey* key_type;
- typedef key_type key_type_ref;
-
- typedef uint32_t data_type;
- typedef data_type data_type_ref;
-
- typedef unsigned hash_value_type;
- typedef unsigned offset_type;
-
- static hash_value_type ComputeHash(PTHIdKey* key) {
- return llvm::djbHash(key->II->getName());
- }
-
- static std::pair<unsigned,unsigned>
- EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
- using namespace llvm::support;
- unsigned n = key->II->getLength() + 1;
- endian::write<uint16_t>(Out, n, little);
- return std::make_pair(n, sizeof(uint32_t));
- }
-
- static void EmitKey(raw_ostream& Out, PTHIdKey* key, unsigned n) {
- // Record the location of the key data. This is used when generating
- // the mapping from persistent IDs to strings.
- key->FileOffset = Out.tell();
- Out.write(key->II->getNameStart(), n);
- }
-
- static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
- unsigned) {
- using namespace llvm::support;
- endian::write<uint32_t>(Out, pID, little);
- }
-};
-} // end anonymous namespace
-
-/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
-/// a hashtable mapping from identifier strings to persistent IDs. The second
-/// is a straight table mapping from persistent IDs to string data (the
-/// keys of the first table).
-///
-std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
- // Build two maps:
- // (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
- // (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
-
- // Note that we use 'calloc', so all the bytes are 0.
- PTHIdKey *IIDMap = static_cast<PTHIdKey*>(
- llvm::safe_calloc(idcount, sizeof(PTHIdKey)));
-
- // Create the hashtable.
- llvm::OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
-
- // Generate mapping from persistent IDs -> IdentifierInfo*.
- for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
- // Decrement by 1 because we are using a vector for the lookup and
- // 0 is reserved for NULL.
- assert(I->second > 0);
- assert(I->second-1 < idcount);
- unsigned idx = I->second-1;
-
- // Store the mapping from persistent ID to IdentifierInfo*
- IIDMap[idx].II = I->first;
-
- // Store the reverse mapping in a hashtable.
- IIOffMap.insert(&IIDMap[idx], I->second);
- }
-
- // Write out the inverse map first. This causes the PCIDKey entries to
- // record PTH file offsets for the string data. This is used to write
- // the second table.
- Offset StringTableOffset = IIOffMap.Emit(Out);
-
- // Now emit the table mapping from persistent IDs to PTH file offsets.
- Offset IDOff = Out.tell();
- Emit32(idcount); // Emit the number of identifiers.
- for (unsigned i = 0 ; i < idcount; ++i)
- Emit32(IIDMap[i].FileOffset);
-
- // Finally, release the inverse map.
- free(IIDMap);
-
- return std::make_pair(IDOff, StringTableOffset);
-}
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index 4e8eb32121dc..1bfc25c4c778 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -129,7 +129,6 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
CInvok->getPreprocessorOpts().ChainedIncludes.clear();
CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
- CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
CInvok->getPreprocessorOpts().DisablePCHValidation = true;
CInvok->getPreprocessorOpts().Includes.clear();
CInvok->getPreprocessorOpts().MacroIncludes.clear();
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index ecb09da3c1ef..f66674535423 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -30,7 +30,6 @@
#include "clang/Frontend/Utils.h"
#include "clang/Frontend/VerifyDiagnosticConsumer.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/CodeCompleteConsumer.h"
@@ -38,6 +37,7 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
@@ -177,18 +177,18 @@ static void collectIncludePCH(CompilerInstance &CI,
std::error_code EC;
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
SimpleASTReaderListener Validator(CI.getPreprocessor());
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an AST file. ASTReader::isAcceptableASTFile is not
// used here since we're not interested in validating the PCH at this time,
// but only to check whether this is a file containing an AST.
if (!ASTReader::readASTFileControlBlock(
- Dir->getName(), FileMgr, CI.getPCHContainerReader(),
+ Dir->path(), FileMgr, CI.getPCHContainerReader(),
/*FindModuleFileExtensions=*/false, Validator,
/*ValidateDiagnosticOptions=*/false))
- MDC->addFile(Dir->getName());
+ MDC->addFile(Dir->path());
}
}
@@ -198,14 +198,14 @@ static void collectVFSEntries(CompilerInstance &CI,
return;
// Collect all VFS found.
- SmallVector<vfs::YAMLVFSEntry, 16> VFSEntries;
+ SmallVector<llvm::vfs::YAMLVFSEntry, 16> VFSEntries;
for (const std::string &VFSFile : CI.getHeaderSearchOpts().VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
llvm::MemoryBuffer::getFile(VFSFile);
if (!Buffer)
return;
- vfs::collectVFSFromYAML(std::move(Buffer.get()), /*DiagHandler*/ nullptr,
- VFSFile, VFSEntries);
+ llvm::vfs::collectVFSFromYAML(std::move(Buffer.get()),
+ /*DiagHandler*/ nullptr, VFSFile, VFSEntries);
}
for (auto &E : VFSEntries)
@@ -303,7 +303,7 @@ CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
FileManager *CompilerInstance::createFileManager() {
if (!hasVirtualFileSystem()) {
- IntrusiveRefCntPtr<vfs::FileSystem> VFS =
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
createVFSFromCompilerInvocation(getInvocation(), getDiagnostics());
setVirtualFileSystem(VFS);
}
@@ -372,10 +372,8 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
- // Create a PTH manager if we are using some form of a token cache.
- PTHManager *PTHMgr = nullptr;
- if (!PPOpts.TokenCache.empty())
- PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
+ // The module manager holds a reference to the old preprocessor (if any).
+ ModuleManager.reset();
// Create the Preprocessor.
HeaderSearch *HeaderInfo =
@@ -383,19 +381,12 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
getDiagnostics(), getLangOpts(), &getTarget());
PP = std::make_shared<Preprocessor>(
Invocation->getPreprocessorOptsPtr(), getDiagnostics(), getLangOpts(),
- getSourceManager(), getPCMCache(), *HeaderInfo, *this, PTHMgr,
+ getSourceManager(), getPCMCache(), *HeaderInfo, *this,
+ /*IdentifierInfoLookup=*/nullptr,
/*OwnsHeaderSearch=*/true, TUKind);
getTarget().adjust(getLangOpts());
PP->Initialize(getTarget(), getAuxTarget());
- // Note that this is different then passing PTHMgr to Preprocessor's ctor.
- // That argument is used as the IdentifierInfoLookup argument to
- // IdentifierTable's ctor.
- if (PTHMgr) {
- PTHMgr->setPreprocessor(&*PP);
- PP->setPTHManager(PTHMgr);
- }
-
if (PPOpts.DetailedRecord)
PP->createPreprocessingRecord();
@@ -911,6 +902,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// taking it as an input instead of hard-coding llvm::errs.
raw_ostream &OS = llvm::errs();
+ if (!Act.PrepareToExecute(*this))
+ return false;
+
// Create the target instance.
setTarget(TargetInfo::CreateTargetInfo(getDiagnostics(),
getInvocation().TargetOpts));
@@ -921,7 +915,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if ((getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) &&
!getFrontendOpts().AuxTriple.empty()) {
auto TO = std::make_shared<TargetOptions>();
- TO->Triple = getFrontendOpts().AuxTriple;
+ TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
TO->HostTriple = getTarget().getTriple().str();
setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
}
@@ -1021,7 +1015,7 @@ static InputKind::Language getLanguageFromOptions(const LangOptions &LangOpts) {
return InputKind::OpenCL;
if (LangOpts.CUDA)
return InputKind::CUDA;
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
return LangOpts.CPlusPlus ? InputKind::ObjCXX : InputKind::ObjC;
return LangOpts.CPlusPlus ? InputKind::CXX : InputKind::C;
}
@@ -1266,7 +1260,7 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance,
<< Module->Name << Locked.getErrorMessage();
// Clear out any potential leftover.
Locked.unsafeRemoveLockFile();
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case llvm::LockFileManager::LFS_Owned:
// We're responsible for building the module ourselves.
if (!compileModuleImpl(ImportingInstance, ModuleNameLoc, Module,
@@ -1615,22 +1609,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
Module::NameVisibilityKind Visibility,
bool IsInclusionDirective) {
// Determine what file we're searching from.
- // FIXME: Should we be deciding whether this is a submodule (here and
- // below) based on -fmodules-ts or should we pass a flag and make the
- // caller decide?
- std::string ModuleName;
- if (getLangOpts().ModulesTS) {
- // FIXME: Same code as Sema::ActOnModuleDecl() so there is probably a
- // better place/way to do this.
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
- }
- }
- else
- ModuleName = Path[0].first->getName();
-
+ StringRef ModuleName = Path[0].first->getName();
SourceLocation ModuleNameLoc = Path[0].second;
// If we've already handled this import, just return the cached result.
@@ -1736,7 +1715,9 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// module cache, we don't know how to rebuild modules.
unsigned ARRFlags = Source == ModuleCache ?
ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing :
- ASTReader::ARR_ConfigurationMismatch;
+ Source == PrebuiltModulePath ?
+ 0 :
+ ASTReader::ARR_ConfigurationMismatch;
switch (ModuleManager->ReadAST(ModuleFileName,
Source == PrebuiltModulePath
? serialization::MK_PrebuiltModule
@@ -1859,7 +1840,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Verify that the rest of the module path actually corresponds to
// a submodule.
bool MapPrivateSubModToTopLevel = false;
- if (!getLangOpts().ModulesTS && Path.size() > 1) {
+ if (Path.size() > 1) {
for (unsigned I = 1, N = Path.size(); I != N; ++I) {
StringRef Name = Path[I].first->getName();
clang::Module *Sub = Module->findSubmodule(Name);
@@ -2139,7 +2120,7 @@ CompilerInstance::lookupMissingImports(StringRef Name,
return false;
}
-void CompilerInstance::resetAndLeakSema() { BuryPointer(takeSema()); }
+void CompilerInstance::resetAndLeakSema() { llvm::BuryPointer(takeSema()); }
void CompilerInstance::setExternalSemaSource(
IntrusiveRefCntPtr<ExternalSemaSource> ESS) {
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 78e6babd0251..3e6528c25982 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -11,6 +11,7 @@
#include "TestModuleFileExtension.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Diagnostic.h"
@@ -23,17 +24,16 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Basic/Visibility.h"
#include "clang/Basic/XRayInstr.h"
#include "clang/Config/config.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/CommandLineSourceLoc.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/LangStandard.h"
#include "clang/Frontend/MigratorOptions.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
@@ -55,6 +55,7 @@
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Option/Arg.h"
@@ -76,6 +77,7 @@
#include "llvm/Support/Process.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
@@ -119,25 +121,25 @@ CompilerInvocationBase::~CompilerInvocationBase() = default;
static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
- unsigned DefaultOpt = 0;
+ unsigned DefaultOpt = llvm::CodeGenOpt::None;
if (IK.getLanguage() == InputKind::OpenCL && !Args.hasArg(OPT_cl_opt_disable))
- DefaultOpt = 2;
+ DefaultOpt = llvm::CodeGenOpt::Default;
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O0))
- return 0;
+ return llvm::CodeGenOpt::None;
if (A->getOption().matches(options::OPT_Ofast))
- return 3;
+ return llvm::CodeGenOpt::Aggressive;
assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
if (S == "s" || S == "z" || S.empty())
- return 2;
+ return llvm::CodeGenOpt::Default;
if (S == "g")
- return 1;
+ return llvm::CodeGenOpt::Less;
return getLastArgIntValue(Args, OPT_O, DefaultOpt, Diags);
}
@@ -180,6 +182,11 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
}
}
+// Parse the Static Analyzer configuration. If \p Diags is set to nullptr,
+// it won't verify the input.
+static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
+ DiagnosticsEngine *Diags);
+
static void getAllNoBuiltinFuncValues(ArgList &Args,
std::vector<std::string> &Funcs) {
SmallVector<const char *, 8> Values;
@@ -278,19 +285,24 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.ShowConfigOptionsList = Args.hasArg(OPT_analyzer_config_help);
Opts.ShowEnabledCheckerList = Args.hasArg(OPT_analyzer_list_enabled_checkers);
+ Opts.ShouldEmitErrorsOnInvalidConfigValue =
+ /* negated */!llvm::StringSwitch<bool>(
+ Args.getLastArgValue(OPT_analyzer_config_compatibility_mode))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(false);
Opts.DisableAllChecks = Args.hasArg(OPT_analyzer_disable_all_checks);
Opts.visualizeExplodedGraphWithGraphViz =
Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
- Opts.visualizeExplodedGraphWithUbiGraph =
- Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
+ Opts.DumpExplodedGraphTo = Args.getLastArgValue(OPT_analyzer_dump_egraph);
Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
Opts.AnalyzeNestedBlocks =
Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
- Opts.eagerlyAssumeBinOpBifurcation = Args.hasArg(OPT_analyzer_eagerly_assume);
Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
@@ -317,7 +329,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
// Go through the analyzer configuration options.
for (const auto *A : Args.filtered(OPT_analyzer_config)) {
- A->claim();
+
// We can have a list of comma separated config names, e.g:
// '-analyzer-config key1=val1,key2=val2'
StringRef configList = A->getValue();
@@ -339,10 +351,25 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Success = false;
break;
}
+
+ // TODO: Check checker options too, possibly in CheckerRegistry.
+ // Leave unknown non-checker configs unclaimed.
+ if (!key.contains(":") && Opts.isUnknownAnalyzerConfig(key)) {
+ if (Opts.ShouldEmitErrorsOnInvalidConfigValue)
+ Diags.Report(diag::err_analyzer_config_unknown) << key;
+ continue;
+ }
+
+ A->claim();
Opts.Config[key] = val;
}
}
+ if (Opts.ShouldEmitErrorsOnInvalidConfigValue)
+ parseAnalyzerConfigs(Opts, &Diags);
+ else
+ parseAnalyzerConfigs(Opts, nullptr);
+
llvm::raw_string_ostream os(Opts.FullCompilerInvocation);
for (unsigned i = 0; i < Args.getNumInputArgStrings(); ++i) {
if (i != 0)
@@ -354,6 +381,91 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
return Success;
}
+static StringRef getStringOption(AnalyzerOptions::ConfigTable &Config,
+ StringRef OptionName, StringRef DefaultVal) {
+ return Config.insert({OptionName, DefaultVal}).first->second;
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ StringRef &OptionField, StringRef Name,
+ StringRef DefaultVal) {
+ // String options may be known to invalid (e.g. if the expected string is a
+ // file name, but the file does not exist), those will have to be checked in
+ // parseConfigs.
+ OptionField = getStringOption(Config, Name, DefaultVal);
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ bool &OptionField, StringRef Name, bool DefaultVal) {
+ auto PossiblyInvalidVal = llvm::StringSwitch<Optional<bool>>(
+ getStringOption(Config, Name, (DefaultVal ? "true" : "false")))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(None);
+
+ if (!PossiblyInvalidVal) {
+ if (Diags)
+ Diags->Report(diag::err_analyzer_config_invalid_input)
+ << Name << "a boolean";
+ else
+ OptionField = DefaultVal;
+ } else
+ OptionField = PossiblyInvalidVal.getValue();
+}
+
+static void initOption(AnalyzerOptions::ConfigTable &Config,
+ DiagnosticsEngine *Diags,
+ unsigned &OptionField, StringRef Name,
+ unsigned DefaultVal) {
+
+ OptionField = DefaultVal;
+ bool HasFailed = getStringOption(Config, Name, std::to_string(DefaultVal))
+ .getAsInteger(10, OptionField);
+ if (Diags && HasFailed)
+ Diags->Report(diag::err_analyzer_config_invalid_input)
+ << Name << "an unsigned";
+}
+
+static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
+ DiagnosticsEngine *Diags) {
+ // TODO: There's no need to store the entire configtable, it'd be plenty
+ // enough tostore checker options.
+
+#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEFAULT_VAL);
+
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ switch (AnOpts.getUserMode()) { \
+ case UMK_Shallow: \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, SHALLOW_VAL); \
+ break; \
+ case UMK_Deep: \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEEP_VAL); \
+ break; \
+ } \
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+#undef ANALYZER_OPTION
+#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
+
+ // At this point, AnalyzerOptions is configured. Let's validate some options.
+
+ if (!Diags)
+ return;
+
+ if (!AnOpts.CTUDir.empty() && !llvm::sys::fs::is_directory(AnOpts.CTUDir))
+ Diags->Report(diag::err_analyzer_config_invalid_input) << "ctu-dir"
+ << "a filename";
+
+ if (!AnOpts.ModelPath.empty() &&
+ !llvm::sys::fs::is_directory(AnOpts.ModelPath))
+ Diags->Report(diag::err_analyzer_config_invalid_input) << "model-path"
+ << "a filename";
+}
+
static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
@@ -369,7 +481,7 @@ static StringRef getCodeModel(ArgList &Args, DiagnosticsEngine &Diags) {
if (Arg *A = Args.getLastArg(OPT_mcode_model)) {
StringRef Value = A->getValue();
if (Value == "small" || Value == "kernel" || Value == "medium" ||
- Value == "large")
+ Value == "large" || Value == "tiny")
return Value;
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Value;
}
@@ -568,6 +680,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
unsigned Val =
llvm::StringSwitch<unsigned>(A->getValue())
.Case("line-tables-only", codegenoptions::DebugLineTablesOnly)
+ .Case("line-directives-only", codegenoptions::DebugDirectivesOnly)
.Case("limited", codegenoptions::LimitedDebugInfo)
.Case("standalone", codegenoptions::FullDebugInfo)
.Default(~0U);
@@ -592,12 +705,29 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
Opts.EmitCodeView = Args.hasArg(OPT_gcodeview);
+ Opts.CodeViewGHash = Args.hasArg(OPT_gcodeview_ghash);
Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
- Opts.EnableSplitDwarf = Args.hasArg(OPT_enable_split_dwarf);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
+
+ if (Arg *A =
+ Args.getLastArg(OPT_enable_split_dwarf, OPT_enable_split_dwarf_EQ)) {
+ if (A->getOption().matches(options::OPT_enable_split_dwarf)) {
+ Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
+ } else {
+ StringRef Name = A->getValue();
+ if (Name == "single")
+ Opts.setSplitDwarfMode(CodeGenOptions::SingleFileFission);
+ else if (Name == "split")
+ Opts.setSplitDwarfMode(CodeGenOptions::SplitFileFission);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ }
+ }
+
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Args.hasArg(OPT_dwarf_explicit_import);
Opts.DebugFwdTemplateParams = Args.hasArg(OPT_debug_forward_template_params);
@@ -614,6 +744,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableLifetimeMarkers = Args.hasArg(OPT_disable_lifetimemarkers);
Opts.DisableO0ImplyOptNone = Args.hasArg(OPT_disable_O0_optnone);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
+ Opts.IndirectTlsSegRefs = Args.hasArg(OPT_mno_tls_direct_seg_refs);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
OPT_fuse_register_sized_bitfield_access);
@@ -625,6 +756,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_ffine_grained_bitfield_accesses,
OPT_fno_fine_grained_bitfield_accesses, false);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
+ Opts.RecordCommandLine = Args.getLastArgValue(OPT_record_command_line);
Opts.MergeAllConstants = Args.hasArg(OPT_fmerge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
@@ -643,7 +775,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ);
Opts.DebugInfoForProfiling = Args.hasFlag(
OPT_fdebug_info_for_profiling, OPT_fno_debug_info_for_profiling, false);
- Opts.GnuPubnames = Args.hasArg(OPT_ggnu_pubnames);
+ Opts.DebugNameTable = static_cast<unsigned>(
+ Args.hasArg(OPT_ggnu_pubnames)
+ ? llvm::DICompileUnit::DebugNameTableKind::GNU
+ : Args.hasArg(OPT_gpubnames)
+ ? llvm::DICompileUnit::DebugNameTableKind::Default
+ : llvm::DICompileUnit::DebugNameTableKind::None);
+ Opts.DebugRangesBaseAddress = Args.hasArg(OPT_fdebug_ranges_base_address);
setPGOInstrumentor(Opts, Args, Diags);
Opts.InstrProfileOutput =
@@ -652,6 +790,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.getLastArgValue(OPT_fprofile_instrument_use_path_EQ);
if (!Opts.ProfileInstrumentUsePath.empty())
setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
+ Opts.ProfileRemappingFile =
+ Args.getLastArgValue(OPT_fprofile_remapping_file_EQ);
+ if (!Opts.ProfileRemappingFile.empty() && !Opts.ExperimentalNewPassManager) {
+ Diags.Report(diag::err_drv_argument_only_allowed_with)
+ << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
+ << "-fexperimental-new-pass-manager";
+ }
Opts.CoverageMapping =
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
@@ -664,7 +809,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.RegisterGlobalDtorsWithAtExit =
Args.hasArg(OPT_fregister_global_dtors_with_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
- Opts.CodeModel = getCodeModel(Args, Diags);
+ Opts.CodeModel = TargetOpts.CodeModel;
Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
Opts.DisableFPElim =
(Args.hasArg(OPT_mdisable_fp_elim) || Args.hasArg(OPT_pg));
@@ -762,6 +907,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
}
Opts.LTOUnit = Args.hasFlag(OPT_flto_unit, OPT_fno_lto_unit, false);
+ Opts.EnableSplitLTOUnit = Args.hasArg(OPT_fsplit_lto_unit);
if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
if (IK.getLanguage() != InputKind::LLVM_IR)
Diags.Report(diag::err_drv_argument_only_allowed_with)
@@ -797,6 +943,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
Opts.CoverageNoFunctionNamesInData =
Args.hasArg(OPT_coverage_no_function_names_in_data);
+ Opts.ProfileFilterFiles =
+ Args.getLastArgValue(OPT_fprofile_filter_files_EQ);
+ Opts.ProfileExcludeFiles =
+ Args.getLastArgValue(OPT_fprofile_exclude_files_EQ);
Opts.CoverageExitBlockBeforeBody =
Args.hasArg(OPT_coverage_exit_block_before_body);
if (Args.hasArg(OPT_coverage_version_EQ)) {
@@ -844,7 +994,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
for (const auto &arg : ASL) {
StringRef ArgStr(arg);
Opts.CmdArgs.insert(Opts.CmdArgs.end(), ArgStr.begin(), ArgStr.end());
- // using \00 to seperate each commandline options.
+ // using \00 to separate each commandline options.
Opts.CmdArgs.push_back('\0');
}
}
@@ -912,10 +1062,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
for (auto *A :
- Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_cuda_bitcode)) {
+ Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
F.Filename = A->getValue();
- if (A->getOption().matches(OPT_mlink_cuda_bitcode)) {
+ if (A->getOption().matches(OPT_mlink_builtin_bitcode)) {
F.LinkFlags = llvm::Linker::Flags::LinkOnlyNeeded;
// When linking CUDA bitcode, propagate function attributes so that
// e.g. libdevice gets fast-math attrs if we're building with fast-math.
@@ -955,11 +1105,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fsanitize_cfi_icall_generalize_pointers);
Opts.SanitizeStats = Args.hasArg(OPT_fsanitize_stats);
if (Arg *A = Args.getLastArg(
- OPT_fsanitize_address_poison_class_member_array_new_cookie,
- OPT_fno_sanitize_address_poison_class_member_array_new_cookie)) {
- Opts.SanitizeAddressPoisonClassMemberArrayNewCookie =
+ OPT_fsanitize_address_poison_custom_array_cookie,
+ OPT_fno_sanitize_address_poison_custom_array_cookie)) {
+ Opts.SanitizeAddressPoisonCustomArrayCookie =
A->getOption().getID() ==
- OPT_fsanitize_address_poison_class_member_array_new_cookie;
+ OPT_fsanitize_address_poison_custom_array_cookie;
}
if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_after_scope,
OPT_fno_sanitize_address_use_after_scope)) {
@@ -968,6 +1118,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.SanitizeAddressGlobalsDeadStripping =
Args.hasArg(OPT_fsanitize_address_globals_dead_stripping);
+ if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_odr_indicator,
+ OPT_fno_sanitize_address_use_odr_indicator)) {
+ Opts.SanitizeAddressUseOdrIndicator =
+ A->getOption().getID() == OPT_fsanitize_address_use_odr_indicator;
+ }
Opts.SSPBufferSize =
getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
@@ -1003,6 +1158,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
}
+
+ if (Args.hasArg(OPT_fno_objc_convert_messages_to_runtime_calls))
+ Opts.ObjCConvertMessagesToRuntimeCalls = 0;
+
if (Args.getLastArg(OPT_femulated_tls) ||
Args.getLastArg(OPT_fno_emulated_tls)) {
Opts.ExplicitEmulatedTLS = true;
@@ -1125,6 +1284,44 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.Addrsig = Args.hasArg(OPT_faddrsig);
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
+ StringRef SignScope = A->getValue();
+
+ if (SignScope.equals_lower("none"))
+ Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::None);
+ else if (SignScope.equals_lower("all"))
+ Opts.setSignReturnAddress(CodeGenOptions::SignReturnAddressScope::All);
+ else if (SignScope.equals_lower("non-leaf"))
+ Opts.setSignReturnAddress(
+ CodeGenOptions::SignReturnAddressScope::NonLeaf);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignScope;
+
+ if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
+ StringRef SignKey = A->getValue();
+ if (!SignScope.empty() && !SignKey.empty()) {
+ if (SignKey.equals_lower("a_key"))
+ Opts.setSignReturnAddressKey(
+ CodeGenOptions::SignReturnAddressKeyValue::AKey);
+ else if (SignKey.equals_lower("b_key"))
+ Opts.setSignReturnAddressKey(
+ CodeGenOptions::SignReturnAddressKeyValue::BKey);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << SignKey;
+ }
+ }
+ }
+
+ Opts.BranchTargetEnforcement = Args.hasArg(OPT_mbranch_target_enforce);
+
+ Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
+
+ Opts.SpeculativeLoadHardening = Args.hasArg(OPT_mspeculative_load_hardening);
+
+ Opts.DefaultFunctionAttrs = Args.getAllArgValues(OPT_default_function_attr);
+
return Success;
}
@@ -1316,7 +1513,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Success = false;
}
else
- llvm::sort(Opts.VerifyPrefixes.begin(), Opts.VerifyPrefixes.end());
+ llvm::sort(Opts.VerifyPrefixes);
DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
@@ -1425,17 +1622,17 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::EmitObj; break;
case OPT_fixit_EQ:
Opts.FixItSuffix = A->getValue();
- // fall-through!
+ LLVM_FALLTHROUGH;
case OPT_fixit:
Opts.ProgramAction = frontend::FixIt; break;
case OPT_emit_module:
Opts.ProgramAction = frontend::GenerateModule; break;
case OPT_emit_module_interface:
Opts.ProgramAction = frontend::GenerateModuleInterface; break;
+ case OPT_emit_header_module:
+ Opts.ProgramAction = frontend::GenerateHeaderModule; break;
case OPT_emit_pch:
Opts.ProgramAction = frontend::GeneratePCH; break;
- case OPT_emit_pth:
- Opts.ProgramAction = frontend::GeneratePTH; break;
case OPT_init_only:
Opts.ProgramAction = frontend::InitOnly; break;
case OPT_fsyntax_only:
@@ -1444,8 +1641,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::ModuleFileInfo; break;
case OPT_verify_pch:
Opts.ProgramAction = frontend::VerifyPCH; break;
- case OPT_print_decl_contexts:
- Opts.ProgramAction = frontend::PrintDeclContext; break;
case OPT_print_preamble:
Opts.ProgramAction = frontend::PrintPreamble; break;
case OPT_E:
@@ -1550,8 +1745,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.OverrideRecordLayoutsFile
= Args.getLastArgValue(OPT_foverride_record_layout_EQ);
- Opts.AuxTriple =
- llvm::Triple::normalize(Args.getLastArgValue(OPT_aux_triple));
+ Opts.AuxTriple = Args.getLastArgValue(OPT_aux_triple);
Opts.StatsFile = Args.getLastArgValue(OPT_stats_file);
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
@@ -1867,7 +2061,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
if (IK.getLanguage() == InputKind::Asm) {
Opts.AsmPreprocessor = 1;
} else if (IK.isObjectiveC()) {
- Opts.ObjC1 = Opts.ObjC2 = 1;
+ Opts.ObjC = 1;
}
if (LangStd == LangStandard::lang_unspecified) {
@@ -2130,6 +2324,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
}
}
+ if (Args.hasArg(OPT_fno_dllexport_inlines))
+ Opts.DllExportInlines = false;
+
if (const Arg *A = Args.getLastArg(OPT_fcf_protection_EQ)) {
StringRef Name = A->getValue();
if (Name == "full" || Name == "branch") {
@@ -2196,9 +2393,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Opts.CUDAIsDevice && Args.hasArg(OPT_fcuda_approx_transcendentals))
Opts.CUDADeviceApproxTranscendentals = 1;
- Opts.CUDARelocatableDeviceCode = Args.hasArg(OPT_fcuda_rdc);
+ Opts.GPURelocatableDeviceCode = Args.hasArg(OPT_fgpu_rdc);
- if (Opts.ObjC1) {
+ if (Opts.ObjC) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
StringRef value = arg->getValue();
if (Opts.ObjCRuntime.tryParse(value))
@@ -2265,8 +2462,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_print_ivar_layout))
Opts.ObjCGCBitmapPrint = 1;
+
if (Args.hasArg(OPT_fno_constant_cfstrings))
Opts.NoConstantCFStrings = 1;
+ if (const auto *A = Args.getLastArg(OPT_fcf_runtime_abi_EQ))
+ Opts.CFRuntime =
+ llvm::StringSwitch<LangOptions::CoreFoundationABI>(A->getValue())
+ .Cases("unspecified", "standalone", "objc",
+ LangOptions::CoreFoundationABI::ObjectiveC)
+ .Cases("swift", "swift-5.0",
+ LangOptions::CoreFoundationABI::Swift5_0)
+ .Case("swift-4.2", LangOptions::CoreFoundationABI::Swift4_2)
+ .Case("swift-4.1", LangOptions::CoreFoundationABI::Swift4_1)
+ .Default(LangOptions::CoreFoundationABI::ObjectiveC);
if (Args.hasArg(OPT_fzvector))
Opts.ZVector = 1;
@@ -2291,6 +2499,9 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fvisibility_inlines_hidden))
Opts.InlineVisibilityHidden = 1;
+ if (Args.hasArg(OPT_fvisibility_global_new_delete_hidden))
+ Opts.GlobalAllocationFunctionVisibilityHidden = 1;
+
if (Args.hasArg(OPT_ftrapv)) {
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
@@ -2314,7 +2525,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
VT.getSubminor().getValueOr(0);
}
- // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // Mimicking gcc's behavior, trigraphs are only enabled if -trigraphs
// is specified, or -std is set to a conforming mode.
// Trigraphs are disabled by default in c++1z onwards.
Opts.Trigraphs = !Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17;
@@ -2395,7 +2606,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules);
Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
- Opts.Char8 = Args.hasArg(OPT_fchar8__t);
+ Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus2a);
if (const Arg *A = Args.getLastArg(OPT_fwchar_type_EQ)) {
Opts.WCharSize = llvm::StringSwitch<unsigned>(A->getValue())
.Case("char", 1)
@@ -2478,7 +2689,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.CurrentModule = Opts.ModuleName;
Opts.AppExt = Args.hasArg(OPT_fapplication_extension);
Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature);
- llvm::sort(Opts.ModuleFeatures.begin(), Opts.ModuleFeatures.end());
+ llvm::sort(Opts.ModuleFeatures);
Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type);
Opts.NativeHalfArgsAndReturns |= Args.hasArg(OPT_fnative_half_arguments_and_returns);
// Enable HalfArgsAndReturns if present in Args or if NativeHalfArgsAndReturns
@@ -2627,6 +2838,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
+ if (Opts.OpenMPIsDevice && T.isNVPTX()) {
+ Opts.OpenMPCUDANumSMs =
+ getLastArgIntValue(Args, options::OPT_fopenmp_cuda_number_of_sm_EQ,
+ Opts.OpenMPCUDANumSMs, Diags);
+ Opts.OpenMPCUDABlocksPerSM =
+ getLastArgIntValue(Args, options::OPT_fopenmp_cuda_blocks_per_sm_EQ,
+ Opts.OpenMPCUDABlocksPerSM, Diags);
+ }
+
+ // Prevent auto-widening the representation of loop counters during an
+ // OpenMP collapse clause.
+ Opts.OpenMPOptimisticCollapse =
+ Args.hasArg(options::OPT_fopenmp_optimistic_collapse) ? 1 : 0;
// Get the OpenMP target triples if any.
if (Arg *A = Args.getLastArg(options::OPT_fopenmp_targets_EQ)) {
@@ -2657,10 +2881,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
<< Opts.OMPHostIRFile;
}
- // set CUDA mode for OpenMP target NVPTX if specified in options
+ // Set CUDA mode for OpenMP target NVPTX if specified in options
Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && T.isNVPTX() &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
+ // Set CUDA mode for OpenMP target NVPTX if specified in options
+ Opts.OpenMPCUDAForceFullRuntime =
+ Opts.OpenMPIsDevice && T.isNVPTX() &&
+ Args.hasArg(options::OPT_fopenmp_cuda_force_full_runtime);
+
// Record whether the __DEPRECATED define was requested.
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
OPT_fno_deprecated_macro,
@@ -2718,6 +2947,19 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
case 3: Opts.setStackProtector(LangOptions::SSPReq); break;
}
+ if (Arg *A = Args.getLastArg(OPT_ftrivial_auto_var_init)) {
+ StringRef Val = A->getValue();
+ if (Val == "uninitialized")
+ Opts.setTrivialAutoVarInit(
+ LangOptions::TrivialAutoVarInitKind::Uninitialized);
+ else if (Val == "zero")
+ Opts.setTrivialAutoVarInit(LangOptions::TrivialAutoVarInitKind::Zero);
+ else if (Val == "pattern")
+ Opts.setTrivialAutoVarInit(LangOptions::TrivialAutoVarInitKind::Pattern);
+ else
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
// Parse -fsanitize= arguments.
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, Opts.Sanitize);
@@ -2753,6 +2995,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// -fallow-editor-placeholders
Opts.AllowEditorPlaceholders = Args.hasArg(OPT_fallow_editor_placeholders);
+ Opts.RegisterStaticDestructors = !Args.hasArg(OPT_fno_cxx_static_destructors);
+
if (Arg *A = Args.getLastArg(OPT_fclang_abi_compat_EQ)) {
Opts.setClangABICompat(LangOptions::ClangABI::Latest);
@@ -2776,6 +3020,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.setClangABICompat(LangOptions::ClangABI::Ver4);
else if (Major <= 6)
Opts.setClangABICompat(LangOptions::ClangABI::Ver6);
+ else if (Major <= 7)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver7);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -2802,13 +3048,12 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::FixIt:
case frontend::GenerateModule:
case frontend::GenerateModuleInterface:
+ case frontend::GenerateHeaderModule:
case frontend::GeneratePCH:
- case frontend::GeneratePTH:
case frontend::ParseSyntaxOnly:
case frontend::ModuleFileInfo:
case frontend::VerifyPCH:
case frontend::PluginAction:
- case frontend::PrintDeclContext:
case frontend::RewriteObjC:
case frontend::RewriteTest:
case frontend::RunAnalysis:
@@ -2833,12 +3078,10 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
- Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
+ Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
+ Args.hasArg(OPT_pch_through_hdrstop_use);
+ Opts.PCHWithHdrStopCreate = Args.hasArg(OPT_pch_through_hdrstop_create);
Opts.PCHThroughHeader = Args.getLastArgValue(OPT_pch_through_header_EQ);
- if (const Arg *A = Args.getLastArg(OPT_token_cache))
- Opts.TokenCache = A->getValue();
- else
- Opts.TokenCache = Opts.ImplicitPTHInclude;
Opts.UsePredefines = !Args.hasArg(OPT_undef);
Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
@@ -2943,6 +3186,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
+ Opts.CodeModel = getCodeModel(Args, Diags);
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
if (Arg *A = Args.getLastArg(OPT_meabi)) {
StringRef Value = A->getValue();
@@ -2971,6 +3215,14 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
Opts.ForceEnableInt128 = Args.hasArg(OPT_fforce_enable_int128);
Opts.NVPTXUseShortPointers = Args.hasFlag(
options::OPT_fcuda_short_ptr, options::OPT_fno_cuda_short_ptr, false);
+ if (Arg *A = Args.getLastArg(options::OPT_target_sdk_version_EQ)) {
+ llvm::VersionTuple Version;
+ if (Version.tryParse(A->getValue()))
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ Opts.SDKVersion = Version;
+ }
}
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
@@ -3023,6 +3275,7 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getTargetOpts(), Res.getFrontendOpts());
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args,
Res.getFileSystemOpts().WorkingDir);
+ llvm::Triple T(Res.getTargetOpts().Triple);
if (DashX.getFormat() == InputKind::Precompiled ||
DashX.getLanguage() == InputKind::LLVM_IR) {
// ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
@@ -3037,12 +3290,18 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, LangOpts.Sanitize);
} else {
- // Other LangOpts are only initialzed when the input is not AST or LLVM IR.
+ // Other LangOpts are only initialized when the input is not AST or LLVM IR.
// FIXME: Should we really be calling this for an InputKind::Asm input?
ParseLangArgs(LangOpts, Args, DashX, Res.getTargetOpts(),
Res.getPreprocessorOpts(), Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
LangOpts.ObjCExceptions = 1;
+ if (T.isOSDarwin() && DashX.isPreprocessed()) {
+ // Supress the darwin-specific 'stdlibcxx-not-found' diagnostic for
+ // preprocessed input as we don't expect it to be used with -std=libc++
+ // anyway.
+ Res.getDiagnosticOpts().Warnings.push_back("no-stdlibcxx-not-found");
+ }
}
LangOpts.FunctionAlignment =
@@ -3064,7 +3323,9 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
// names.
Res.getCodeGenOpts().DiscardValueNames &=
!LangOpts.Sanitize.has(SanitizerKind::Address) &&
- !LangOpts.Sanitize.has(SanitizerKind::Memory);
+ !LangOpts.Sanitize.has(SanitizerKind::KernelAddress) &&
+ !LangOpts.Sanitize.has(SanitizerKind::Memory) &&
+ !LangOpts.Sanitize.has(SanitizerKind::KernelMemory);
ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, Diags,
Res.getFrontendOpts().ProgramAction);
@@ -3072,7 +3333,6 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Res.getFrontendOpts().ProgramAction);
// Turn on -Wspir-compat for SPIR target.
- llvm::Triple T(Res.getTargetOpts().Triple);
auto Arch = T.getArch();
if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64) {
Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
@@ -3156,6 +3416,12 @@ std::string CompilerInvocation::getModuleHash() const {
code = ext->hashExtension(code);
}
+ // When compiling with -gmodules, also hash -fdebug-prefix-map as it
+ // affects the debug info in the PCM.
+ if (getCodeGenOpts().DebugTypeExtRefs)
+ for (const auto &KeyValue : getCodeGenOpts().DebugPrefixMap)
+ code = hash_combine(code, KeyValue.first, KeyValue.second);
+
// Extend the signature with the enabled sanitizers, if at least one is
// enabled. Sanitizers which cannot affect AST generation aren't hashed.
SanitizerSet SanHash = LangOpts->Sanitize;
@@ -3195,53 +3461,40 @@ uint64_t getLastArgUInt64Value(const ArgList &Args, OptSpecifier Id,
return getLastArgIntValueImpl<uint64_t>(Args, Id, Default, Diags);
}
-void BuryPointer(const void *Ptr) {
- // This function may be called only a small fixed amount of times per each
- // invocation, otherwise we do actually have a leak which we want to report.
- // If this function is called more than kGraveYardMaxSize times, the pointers
- // will not be properly buried and a leak detector will report a leak, which
- // is what we want in such case.
- static const size_t kGraveYardMaxSize = 16;
- LLVM_ATTRIBUTE_UNUSED static const void *GraveYard[kGraveYardMaxSize];
- static std::atomic<unsigned> GraveYardSize;
- unsigned Idx = GraveYardSize++;
- if (Idx >= kGraveYardMaxSize)
- return;
- GraveYard[Idx] = Ptr;
-}
-
-IntrusiveRefCntPtr<vfs::FileSystem>
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
createVFSFromCompilerInvocation(const CompilerInvocation &CI,
DiagnosticsEngine &Diags) {
- return createVFSFromCompilerInvocation(CI, Diags, vfs::getRealFileSystem());
+ return createVFSFromCompilerInvocation(CI, Diags,
+ llvm::vfs::getRealFileSystem());
}
-IntrusiveRefCntPtr<vfs::FileSystem>
-createVFSFromCompilerInvocation(const CompilerInvocation &CI,
- DiagnosticsEngine &Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS) {
+IntrusiveRefCntPtr<llvm::vfs::FileSystem> createVFSFromCompilerInvocation(
+ const CompilerInvocation &CI, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
return BaseFS;
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> Overlay(
- new vfs::OverlayFileSystem(BaseFS));
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> Result = BaseFS;
// earlier vfs files are on the bottom
for (const auto &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
- BaseFS->getBufferForFile(File);
+ Result->getBufferForFile(File);
if (!Buffer) {
Diags.Report(diag::err_missing_vfs_overlay_file) << File;
continue;
}
- IntrusiveRefCntPtr<vfs::FileSystem> FS = vfs::getVFSFromYAML(
- std::move(Buffer.get()), /*DiagHandler*/ nullptr, File);
- if (FS)
- Overlay->pushOverlay(FS);
- else
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS = llvm::vfs::getVFSFromYAML(
+ std::move(Buffer.get()), /*DiagHandler*/ nullptr, File,
+ /*DiagContext*/ nullptr, Result);
+ if (!FS) {
Diags.Report(diag::err_invalid_vfs_overlay) << File;
+ continue;
+ }
+
+ Result = FS;
}
- return Overlay;
+ return Result;
}
} // namespace clang
diff --git a/lib/Frontend/CreateInvocationFromCommandLine.cpp b/lib/Frontend/CreateInvocationFromCommandLine.cpp
index c3ce7ce2b742..2d4c40f8b9f1 100644
--- a/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -32,7 +32,7 @@ using namespace llvm::opt;
/// argument vector.
std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
ArrayRef<const char *> ArgList, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!Diags.get()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index e6e07190e1ff..a03d4b79c8b9 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -17,7 +17,6 @@
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/DirectoryLookup.h"
-#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
@@ -200,6 +199,10 @@ public:
const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
+ void HasInclude(SourceLocation Loc, StringRef SpelledFilename, bool IsAngled,
+ const FileEntry *File,
+ SrcMgr::CharacteristicKind FileType) override;
+
void EndOfMainFile() override {
OutputDependencyFile();
}
@@ -328,6 +331,17 @@ void DFGImpl::InclusionDirective(SourceLocation HashLoc,
}
}
+void DFGImpl::HasInclude(SourceLocation Loc, StringRef SpelledFilename,
+ bool IsAngled, const FileEntry *File,
+ SrcMgr::CharacteristicKind FileType) {
+ if (!File)
+ return;
+ StringRef Filename = File->getName();
+ if (!FileMatchesDepCriteria(Filename.data(), FileType))
+ return;
+ AddFilename(llvm::sys::path::remove_leading_dotslash(Filename));
+}
+
bool DFGImpl::AddFilename(StringRef Filename) {
if (FilesSet.insert(Filename).second) {
Files.push_back(Filename);
@@ -386,28 +400,32 @@ bool DFGImpl::AddFilename(StringRef Filename) {
/// for Windows file-naming info.
static void PrintFilename(raw_ostream &OS, StringRef Filename,
DependencyOutputFormat OutputFormat) {
+ // Convert filename to platform native path
+ llvm::SmallString<256> NativePath;
+ llvm::sys::path::native(Filename.str(), NativePath);
+
if (OutputFormat == DependencyOutputFormat::NMake) {
// Add quotes if needed. These are the characters listed as "special" to
// NMake, that are legal in a Windows filespec, and that could cause
// misinterpretation of the dependency string.
- if (Filename.find_first_of(" #${}^!") != StringRef::npos)
- OS << '\"' << Filename << '\"';
+ if (NativePath.find_first_of(" #${}^!") != StringRef::npos)
+ OS << '\"' << NativePath << '\"';
else
- OS << Filename;
+ OS << NativePath;
return;
}
assert(OutputFormat == DependencyOutputFormat::Make);
- for (unsigned i = 0, e = Filename.size(); i != e; ++i) {
- if (Filename[i] == '#') // Handle '#' the broken gcc way.
+ for (unsigned i = 0, e = NativePath.size(); i != e; ++i) {
+ if (NativePath[i] == '#') // Handle '#' the broken gcc way.
OS << '\\';
- else if (Filename[i] == ' ') { // Handle space correctly.
+ else if (NativePath[i] == ' ') { // Handle space correctly.
OS << '\\';
unsigned j = i;
- while (j > 0 && Filename[--j] == '\\')
+ while (j > 0 && NativePath[--j] == '\\')
OS << '\\';
- } else if (Filename[i] == '$') // $ is escaped by $$.
+ } else if (NativePath[i] == '$') // $ is escaped by $$.
OS << '$';
- OS << Filename[i];
+ OS << NativePath[i];
}
}
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index 757ceec7ec9d..3bd86dc5beaa 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -337,8 +337,8 @@ static void computeCommonMacroArgExpansionFileIDs(
SmallVector<FileID, 4> EndArgExpansions;
getMacroArgExpansionFileIDs(Begin, BeginArgExpansions, /*IsBegin=*/true, SM);
getMacroArgExpansionFileIDs(End, EndArgExpansions, /*IsBegin=*/false, SM);
- llvm::sort(BeginArgExpansions.begin(), BeginArgExpansions.end());
- llvm::sort(EndArgExpansions.begin(), EndArgExpansions.end());
+ llvm::sort(BeginArgExpansions);
+ llvm::sort(EndArgExpansions);
std::set_intersection(BeginArgExpansions.begin(), BeginArgExpansions.end(),
EndArgExpansions.begin(), EndArgExpansions.end(),
std::back_inserter(CommonArgExpansions));
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 74550c410396..f5226380b4dd 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -26,6 +26,7 @@
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -151,6 +152,24 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
if (!Consumer)
return nullptr;
+ // Validate -add-plugin args.
+ bool FoundAllPlugins = true;
+ for (const std::string &Arg : CI.getFrontendOpts().AddPluginActions) {
+ bool Found = false;
+ for (FrontendPluginRegistry::iterator it = FrontendPluginRegistry::begin(),
+ ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() == Arg)
+ Found = true;
+ }
+ if (!Found) {
+ CI.getDiagnostics().Report(diag::err_fe_invalid_plugin_name) << Arg;
+ FoundAllPlugins = false;
+ }
+ }
+ if (!FoundAllPlugins)
+ return nullptr;
+
// If there are no registered plugins we don't need to wrap the consumer
if (FrontendPluginRegistry::begin() == FrontendPluginRegistry::end())
return Consumer;
@@ -276,7 +295,7 @@ static void addHeaderInclude(StringRef HeaderName,
bool IsExternC) {
if (IsExternC && LangOpts.CPlusPlus)
Includes += "extern \"C\" {\n";
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
Includes += "#import \"";
else
Includes += "#include \"";
@@ -342,17 +361,17 @@ static std::error_code collectModuleHeaderIncludes(
SmallString<128> DirNative;
llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
// headers.
- if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->getName()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
- .Default(false))
+ if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
continue;
- const FileEntry *Header = FileMgr.getFile(Dir->getName());
+ const FileEntry *Header = FileMgr.getFile(Dir->path());
// FIXME: This shouldn't happen unless there is a file system race. Is
// that worth diagnosing?
if (!Header)
@@ -365,7 +384,7 @@ static std::error_code collectModuleHeaderIncludes(
// Compute the relative path from the directory to this file.
SmallVector<StringRef, 16> Components;
- auto PathIt = llvm::sys::path::rbegin(Dir->getName());
+ auto PathIt = llvm::sys::path::rbegin(Dir->path());
for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
Components.push_back(*PathIt);
SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
@@ -523,7 +542,6 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
setCurrentInput(Input);
setCompilerInstance(&CI);
- StringRef InputFile = Input.getFile();
bool HasBegunSourceFile = false;
bool ReplayASTFile = Input.getKind().getFormat() == InputKind::Precompiled &&
usesPreprocessorOnly();
@@ -541,6 +559,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
&Diags->getDiagnosticOptions()));
ASTDiags->setClient(Diags->getClient(), /*OwnsClient*/false);
+ // FIXME: What if the input is a memory buffer?
+ StringRef InputFile = Input.getFile();
+
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
InputFile, CI.getPCHContainerReader(), ASTUnit::LoadPreprocessorOnly,
ASTDiags, CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
@@ -566,7 +587,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
auto &MM = ASTReader->getModuleManager();
auto &PrimaryModule = MM.getPrimaryModule();
- for (ModuleFile &MF : MM)
+ for (serialization::ModuleFile &MF : MM)
if (&MF != &PrimaryModule)
CI.getFrontendOpts().ModuleFiles.push_back(MF.FileName);
@@ -585,12 +606,12 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
assert(ASTModule && "module file does not define its own module");
Input = FrontendInputFile(ASTModule->PresumedModuleMapFile, Kind);
} else {
- auto &SM = CI.getSourceManager();
- FileID ID = SM.getMainFileID();
- if (auto *File = SM.getFileEntryForID(ID))
+ auto &OldSM = AST->getSourceManager();
+ FileID ID = OldSM.getMainFileID();
+ if (auto *File = OldSM.getFileEntryForID(ID))
Input = FrontendInputFile(File->getName(), Kind);
else
- Input = FrontendInputFile(SM.getBuffer(ID), Kind);
+ Input = FrontendInputFile(OldSM.getBuffer(ID), Kind);
}
setCurrentInput(Input, std::move(AST));
}
@@ -604,6 +625,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+ // FIXME: What if the input is a memory buffer?
+ StringRef InputFile = Input.getFile();
+
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
InputFile, CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
CI.getFileSystemOpts(), CI.getCodeGenOpts().DebugTypeExtRefs);
@@ -691,15 +715,16 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an acceptable AST file.
if (ASTReader::isAcceptableASTFile(
- Dir->getName(), FileMgr, CI.getPCHContainerReader(),
+ Dir->path(), FileMgr, CI.getPCHContainerReader(),
CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
SpecificModuleCachePath)) {
- PPOpts.ImplicitPCHInclude = Dir->getName();
+ PPOpts.ImplicitPCHInclude = Dir->path();
Found = true;
break;
}
@@ -791,7 +816,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// For preprocessed files, check if the first line specifies the original
// source file name with a linemarker.
- std::string PresumedInputFile = InputFile;
+ std::string PresumedInputFile = getCurrentFileOrBufferName();
if (Input.isPreprocessed())
ReadOriginalFileName(CI, PresumedInputFile);
@@ -943,7 +968,7 @@ void FrontendAction::EndSourceFile() {
if (DisableFree) {
CI.resetAndLeakSema();
CI.resetAndLeakASTContext();
- BuryPointer(CI.takeASTConsumer().get());
+ llvm::BuryPointer(CI.takeASTConsumer().get());
} else {
CI.setSema(nullptr);
CI.setASTContext(nullptr);
@@ -968,7 +993,7 @@ void FrontendAction::EndSourceFile() {
CI.resetAndLeakPreprocessor();
CI.resetAndLeakSourceManager();
CI.resetAndLeakFileManager();
- BuryPointer(CurrentASTUnit.release());
+ llvm::BuryPointer(std::move(CurrentASTUnit));
} else {
CI.setPreprocessor(nullptr);
CI.setSourceManager(nullptr);
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 8a8354c7d4c9..a407dfc162bb 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -92,12 +92,6 @@ ASTViewAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
}
std::unique_ptr<ASTConsumer>
-DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
- StringRef InFile) {
- return CreateDeclContextPrinter();
-}
-
-std::unique_ptr<ASTConsumer>
GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
std::string Sysroot;
if (!ComputeASTConsumerArguments(CI, /*ref*/ Sysroot))
@@ -242,6 +236,76 @@ GenerateModuleInterfaceAction::CreateOutputFile(CompilerInstance &CI,
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
+bool GenerateHeaderModuleAction::PrepareToExecuteAction(
+ CompilerInstance &CI) {
+ if (!CI.getLangOpts().Modules && !CI.getLangOpts().ModulesTS) {
+ CI.getDiagnostics().Report(diag::err_header_module_requires_modules);
+ return false;
+ }
+
+ auto &Inputs = CI.getFrontendOpts().Inputs;
+ if (Inputs.empty())
+ return GenerateModuleAction::BeginInvocation(CI);
+
+ auto Kind = Inputs[0].getKind();
+
+ // Convert the header file inputs into a single module input buffer.
+ SmallString<256> HeaderContents;
+ ModuleHeaders.reserve(Inputs.size());
+ for (const FrontendInputFile &FIF : Inputs) {
+ // FIXME: We should support re-compiling from an AST file.
+ if (FIF.getKind().getFormat() != InputKind::Source || !FIF.isFile()) {
+ CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
+ << (FIF.isFile() ? FIF.getFile()
+ : FIF.getBuffer()->getBufferIdentifier());
+ return true;
+ }
+
+ HeaderContents += "#include \"";
+ HeaderContents += FIF.getFile();
+ HeaderContents += "\"\n";
+ ModuleHeaders.push_back(FIF.getFile());
+ }
+ Buffer = llvm::MemoryBuffer::getMemBufferCopy(
+ HeaderContents, Module::getModuleInputBufferName());
+
+ // Set that buffer up as our "real" input.
+ Inputs.clear();
+ Inputs.push_back(FrontendInputFile(Buffer.get(), Kind, /*IsSystem*/false));
+
+ return GenerateModuleAction::PrepareToExecuteAction(CI);
+}
+
+bool GenerateHeaderModuleAction::BeginSourceFileAction(
+ CompilerInstance &CI) {
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_HeaderModule);
+
+ // Synthesize a Module object for the given headers.
+ auto &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ SmallVector<Module::Header, 16> Headers;
+ for (StringRef Name : ModuleHeaders) {
+ const DirectoryLookup *CurDir = nullptr;
+ const FileEntry *FE = HS.LookupFile(
+ Name, SourceLocation(), /*Angled*/ false, nullptr, CurDir,
+ None, nullptr, nullptr, nullptr, nullptr, nullptr);
+ if (!FE) {
+ CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
+ << Name;
+ continue;
+ }
+ Headers.push_back({Name, FE});
+ }
+ HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
+
+ return GenerateModuleAction::BeginSourceFileAction(CI);
+}
+
+std::unique_ptr<raw_pwrite_stream>
+GenerateHeaderModuleAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
+ return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
+}
+
SyntaxOnlyAction::~SyntaxOnlyAction() {
}
@@ -341,6 +405,8 @@ private:
return "PriorTemplateArgumentSubstitution";
case CodeSynthesisContext::DefaultTemplateArgumentChecking:
return "DefaultTemplateArgumentChecking";
+ case CodeSynthesisContext::ExceptionSpecEvaluation:
+ return "ExceptionSpecEvaluation";
case CodeSynthesisContext::ExceptionSpecInstantiation:
return "ExceptionSpecInstantiation";
case CodeSynthesisContext::DeclaringSpecialMember:
@@ -599,6 +665,17 @@ namespace {
return true;
}
+
+ /// Returns true if this \c ASTReaderListener wants to receive the
+ /// imports of the AST file via \c visitImport, false otherwise.
+ bool needsImportVisitation() const override { return true; }
+
+ /// If needsImportVisitation returns \c true, this is called for each
+ /// AST file imported by this AST file.
+ void visitImport(StringRef ModuleName, StringRef Filename) override {
+ Out.indent(2) << "Imports module '" << ModuleName
+ << "': " << Filename.str() << "\n";
+ }
#undef DUMP_BOOLEAN
};
}
@@ -673,16 +750,6 @@ void DumpTokensAction::ExecuteAction() {
} while (Tok.isNot(tok::eof));
}
-void GeneratePTHAction::ExecuteAction() {
- CompilerInstance &CI = getCompilerInstance();
- std::unique_ptr<raw_pwrite_stream> OS =
- CI.createDefaultOutputFile(true, getCurrentFile());
- if (!OS)
- return;
-
- CacheTokens(CI.getPreprocessor(), OS.get());
-}
-
void PreprocessOnlyAction::ExecuteAction() {
Preprocessor &PP = getCompilerInstance().getPreprocessor();
@@ -742,7 +809,7 @@ void PrintPreprocessedAction::ExecuteAction() {
}
std::unique_ptr<raw_ostream> OS =
- CI.createDefaultOutputFile(BinaryMode, getCurrentFile());
+ CI.createDefaultOutputFile(BinaryMode, getCurrentFileOrBufferName());
if (!OS) return;
// If we're preprocessing a module map, start by dumping the contents of the
@@ -754,8 +821,6 @@ void PrintPreprocessedAction::ExecuteAction() {
OS->write_escaped(Input.getFile());
(*OS) << "\"\n";
}
- // FIXME: Include additional information here so that we don't need the
- // original source files to exist on disk.
getCurrentModule()->print(*OS);
(*OS) << "#pragma clang module contents\n";
}
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index 8a70404629d3..ac3bb713ddcc 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -260,6 +260,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
@@ -412,6 +413,7 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
switch (os) {
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
llvm_unreachable("Include management is handled in the driver.");
break;
@@ -460,6 +462,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
break; // Everything else continues to use this routine's logic.
case llvm::Triple::Linux:
+ case llvm::Triple::Hurd:
case llvm::Triple::Solaris:
return;
@@ -473,22 +476,6 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
if (Lang.CPlusPlus && !Lang.AsmPreprocessor &&
HSOpts.UseStandardCXXIncludes && HSOpts.UseStandardSystemIncludes) {
if (HSOpts.UseLibcxx) {
- if (triple.isOSDarwin()) {
- // On Darwin, libc++ may be installed alongside the compiler in
- // include/c++/v1.
- if (!HSOpts.ResourceDir.empty()) {
- // Remove version from foo/lib/clang/version
- StringRef NoVer = llvm::sys::path::parent_path(HSOpts.ResourceDir);
- // Remove clang from foo/lib/clang
- StringRef Lib = llvm::sys::path::parent_path(NoVer);
- // Remove lib from foo/lib
- SmallString<128> P = llvm::sys::path::parent_path(Lib);
-
- // Get foo/include/c++/v1
- llvm::sys::path::append(P, "include", "c++", "v1");
- AddUnmappedPath(P, CXXSystem, false);
- }
- }
AddPath("/usr/include/c++/v1", CXXSystem, false);
} else {
AddDefaultCPlusPlusIncludePaths(Lang, triple, HSOpts);
@@ -616,11 +603,11 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
for (auto &Include : IncludePath)
if (Include.first == System || Include.first == ExternCSystem ||
- (!Lang.ObjC1 && !Lang.CPlusPlus && Include.first == CSystem) ||
- (/*FIXME !Lang.ObjC1 && */ Lang.CPlusPlus &&
+ (!Lang.ObjC && !Lang.CPlusPlus && Include.first == CSystem) ||
+ (/*FIXME !Lang.ObjC && */ Lang.CPlusPlus &&
Include.first == CXXSystem) ||
- (Lang.ObjC1 && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
- (Lang.ObjC1 && Lang.CPlusPlus && Include.first == ObjCXXSystem))
+ (Lang.ObjC && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
+ (Lang.ObjC && Lang.CPlusPlus && Include.first == ObjCXXSystem))
SearchList.push_back(Include.second);
for (auto &Include : IncludePath)
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index e3f4f92b9d1e..66807b097d40 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -21,7 +21,6 @@
#include "clang/Frontend/FrontendOptions.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
@@ -76,23 +75,6 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder, StringRef File) {
Builder.append("##"); // ##?
}
-/// AddImplicitIncludePTH - Add an implicit \#include using the original file
-/// used to generate a PTH cache.
-static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
- StringRef ImplicitIncludePTH) {
- PTHManager *P = PP.getPTHManager();
- // Null check 'P' in the corner case where it couldn't be created.
- const char *OriginalFile = P ? P->getOriginalSourceFile() : nullptr;
-
- if (!OriginalFile) {
- PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
- << ImplicitIncludePTH;
- return;
- }
-
- AddImplicitInclude(Builder, OriginalFile);
-}
-
/// Add an implicit \#include using the original file used to generate
/// a PCH file.
static void AddImplicitIncludePCH(MacroBuilder &Builder, Preprocessor &PP,
@@ -421,7 +403,7 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_UTF_16__", "1");
Builder.defineMacro("__STDC_UTF_32__", "1");
- if (LangOpts.ObjC1)
+ if (LangOpts.ObjC)
Builder.defineMacro("__OBJC__");
// OpenCL v1.0/1.1 s6.9, v1.2/2.0 s6.10: Preprocessor Directives and Macros.
@@ -553,20 +535,21 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_guaranteed_copy_elision", "201606L");
Builder.defineMacro("__cpp_nontype_template_parameter_auto", "201606L");
}
- if (LangOpts.AlignedAllocation)
+ if (LangOpts.AlignedAllocation && !LangOpts.AlignedAllocationUnavailable)
Builder.defineMacro("__cpp_aligned_new", "201606L");
if (LangOpts.RelaxedTemplateTemplateArgs)
Builder.defineMacro("__cpp_template_template_args", "201611L");
+ // C++20 features.
+ if (LangOpts.Char8)
+ Builder.defineMacro("__cpp_char8_t", "201811L");
+ Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
+
// TS features.
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1L");
if (LangOpts.CoroutinesTS)
Builder.defineMacro("__cpp_coroutines", "201703L");
-
- // Potential future breaking changes.
- if (LangOpts.Char8)
- Builder.defineMacro("__cpp_char8_t", "201803L");
}
static void InitializePredefinedMacros(const TargetInfo &TI,
@@ -635,7 +618,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (!LangOpts.MSVCCompat && LangOpts.CPlusPlus11)
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
- if (LangOpts.ObjC1) {
+ if (LangOpts.ObjC) {
if (LangOpts.ObjCRuntime.isNonFragile()) {
Builder.defineMacro("__OBJC2__");
@@ -699,7 +682,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (!LangOpts.NoConstantCFStrings)
Builder.defineMacro("__CONSTANT_CFSTRINGS__");
- if (LangOpts.ObjC2)
+ if (LangOpts.ObjC)
Builder.defineMacro("OBJC_NEW_PROPERTIES");
if (LangOpts.PascalStrings)
@@ -1016,7 +999,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
Builder.defineMacro("__autoreleasing", "");
Builder.defineMacro("__unsafe_unretained", "");
- } else if (LangOpts.ObjC1) {
+ } else if (LangOpts.ObjC) {
Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
Builder.defineMacro("__autoreleasing",
@@ -1130,7 +1113,7 @@ void clang::InitializePreprocessor(
// Install definitions to make Objective-C++ ARC work well with various
// C++ Standard Library implementations.
- if (LangOpts.ObjC1 && LangOpts.CPlusPlus &&
+ if (LangOpts.ObjC && LangOpts.CPlusPlus &&
(LangOpts.ObjCAutoRefCount || LangOpts.ObjCWeak)) {
switch (InitOpts.ObjCXXARCStandardLibrary) {
case ARCXX_nolib:
@@ -1177,8 +1160,6 @@ void clang::InitializePreprocessor(
if (!InitOpts.ImplicitPCHInclude.empty())
AddImplicitIncludePCH(Builder, PP, PCHContainerRdr,
InitOpts.ImplicitPCHInclude);
- if (!InitOpts.ImplicitPTHInclude.empty())
- AddImplicitIncludePTH(Builder, PP, InitOpts.ImplicitPTHInclude);
// Process -include directives.
for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index 25cad8be6d00..fa8efcc3b53c 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -156,10 +156,6 @@ void ModuleDependencyCollector::writeFileMap() {
// allows crash reproducer scripts to work across machines.
VFSWriter.setOverlayDir(VFSDir);
- // Do not ignore non existent contents otherwise we might skip something
- // that should have been collected here.
- VFSWriter.setIgnoreNonExistentContents(false);
-
// Explicitly set case sensitivity for the YAML writer. For that, find out
// the sensitivity at the path where the headers all collected to.
VFSWriter.setCaseSensitivity(isCaseSensitivePath(VFSDir));
diff --git a/lib/Frontend/PrecompiledPreamble.cpp b/lib/Frontend/PrecompiledPreamble.cpp
index 30ae2db26d86..1930af187e7a 100644
--- a/lib/Frontend/PrecompiledPreamble.cpp
+++ b/lib/Frontend/PrecompiledPreamble.cpp
@@ -14,7 +14,6 @@
#include "clang/Frontend/PrecompiledPreamble.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -30,6 +29,7 @@
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <limits>
#include <utility>
@@ -48,17 +48,17 @@ StringRef getInMemoryPreamblePath() {
#endif
}
-IntrusiveRefCntPtr<vfs::FileSystem>
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
createVFSOverlayForPreamblePCH(StringRef PCHFilename,
std::unique_ptr<llvm::MemoryBuffer> PCHBuffer,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
// We want only the PCH file from the real filesystem to be available,
// so we create an in-memory VFS with just that and overlay it on top.
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> PCHFS(
- new vfs::InMemoryFileSystem());
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> PCHFS(
+ new llvm::vfs::InMemoryFileSystem());
PCHFS->addFile(PCHFilename, 0, std::move(PCHBuffer));
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> Overlay(
- new vfs::OverlayFileSystem(VFS));
+ IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> Overlay(
+ new llvm::vfs::OverlayFileSystem(VFS));
Overlay->pushOverlay(PCHFS);
return Overlay;
}
@@ -232,14 +232,12 @@ PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
- DiagnosticsEngine &Diagnostics, IntrusiveRefCntPtr<vfs::FileSystem> VFS,
+ DiagnosticsEngine &Diagnostics,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps, bool StoreInMemory,
PreambleCallbacks &Callbacks) {
assert(VFS && "VFS is null");
- if (!Bounds.Size)
- return BuildPreambleError::PreambleIsEmpty;
-
auto PreambleInvocation = std::make_shared<CompilerInvocation>(Invocation);
FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
PreprocessorOptions &PreprocessorOpts =
@@ -413,7 +411,7 @@ std::size_t PrecompiledPreamble::getSize() const {
bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer,
PreambleBounds Bounds,
- vfs::FileSystem *VFS) const {
+ llvm::vfs::FileSystem *VFS) const {
assert(
Bounds.Size <= MainFileBuffer->getBufferSize() &&
@@ -423,17 +421,14 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
PreprocessorOptions &PreprocessorOpts =
PreambleInvocation->getPreprocessorOpts();
- if (!Bounds.Size)
- return false;
-
// We've previously computed a preamble. Check whether we have the same
// preamble now that we did before, and that there's enough space in
// the main-file buffer within the precompiled preamble to fit the
// new main file.
if (PreambleBytes.size() != Bounds.Size ||
PreambleEndsAtStartOfLine != Bounds.PreambleEndsAtStartOfLine ||
- memcmp(PreambleBytes.data(), MainFileBuffer->getBufferStart(),
- Bounds.Size) != 0)
+ !std::equal(PreambleBytes.begin(), PreambleBytes.end(),
+ MainFileBuffer->getBuffer().begin()))
return false;
// The preamble has not changed. We may be able to re-use the precompiled
// preamble.
@@ -443,7 +438,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// remapping or unsaved_files.
std::map<llvm::sys::fs::UniqueID, PreambleFileHash> OverriddenFiles;
for (const auto &R : PreprocessorOpts.RemappedFiles) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(R.second), Status)) {
// If we can't stat the file we're remapping to, assume that something
// horrible happened.
@@ -455,7 +450,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
}
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(RB.first), Status))
return false;
@@ -465,7 +460,7 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
// Check whether anything has changed.
for (const auto &F : FilesInPreamble) {
- vfs::Status Status;
+ llvm::vfs::Status Status;
if (!moveOnNoError(VFS->status(F.first()), Status)) {
// If we can't stat the file, assume that something horrible happened.
return false;
@@ -491,14 +486,14 @@ bool PrecompiledPreamble::CanReuse(const CompilerInvocation &Invocation,
}
void PrecompiledPreamble::AddImplicitPreamble(
- CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
PreambleBounds Bounds(PreambleBytes.size(), PreambleEndsAtStartOfLine);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
}
void PrecompiledPreamble::OverridePreamble(
- CompilerInvocation &CI, IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), MainFileBuffer, 0);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
@@ -686,7 +681,7 @@ PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(
void PrecompiledPreamble::configurePreamble(
PreambleBounds Bounds, CompilerInvocation &CI,
- IntrusiveRefCntPtr<vfs::FileSystem> &VFS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
assert(VFS);
@@ -707,13 +702,14 @@ void PrecompiledPreamble::configurePreamble(
void PrecompiledPreamble::setupPreambleStorage(
const PCHStorage &Storage, PreprocessorOptions &PreprocessorOpts,
- IntrusiveRefCntPtr<vfs::FileSystem> &VFS) {
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
const TempPCHFile &PCHFile = Storage.asFile();
PreprocessorOpts.ImplicitPCHInclude = PCHFile.getFilePath();
// Make sure we can access the PCH file even if we're using a VFS
- IntrusiveRefCntPtr<vfs::FileSystem> RealFS = vfs::getRealFileSystem();
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
+ llvm::vfs::getRealFileSystem();
auto PCHPath = PCHFile.getFilePath();
if (VFS == RealFS || VFS->exists(PCHPath))
return;
@@ -748,8 +744,10 @@ std::unique_ptr<PPCallbacks> PreambleCallbacks::createPPCallbacks() {
return nullptr;
}
+static llvm::ManagedStatic<BuildPreambleErrorCategory> BuildPreambleErrCategory;
+
std::error_code clang::make_error_code(BuildPreambleError Error) {
- return std::error_code(static_cast<int>(Error), BuildPreambleErrorCategory());
+ return std::error_code(static_cast<int>(Error), *BuildPreambleErrCategory);
}
const char *BuildPreambleErrorCategory::name() const noexcept {
@@ -758,8 +756,6 @@ const char *BuildPreambleErrorCategory::name() const noexcept {
std::string BuildPreambleErrorCategory::message(int condition) const {
switch (static_cast<BuildPreambleError>(condition)) {
- case BuildPreambleError::PreambleIsEmpty:
- return "Preamble is empty";
case BuildPreambleError::CouldntCreateTempFile:
return "Could not create temporary file for PCH";
case BuildPreambleError::CouldntCreateTargetInfo:
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 69cd072ea57f..3b835985a54c 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -750,6 +750,11 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
continue;
+ } else if (Tok.isAnnotation()) {
+ // Ignore annotation tokens created by pragmas - the pragmas themselves
+ // will be reproduced in the preprocessed output.
+ PP.Lex(Tok);
+ continue;
} else if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
OS << II->getName();
} else if (Tok.isLiteral() && !Tok.needsCleaning() &&
diff --git a/lib/Frontend/Rewrite/FrontendActions.cpp b/lib/Frontend/Rewrite/FrontendActions.cpp
index fa17b3e7cb3f..bcf6d215c998 100644
--- a/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -181,7 +181,7 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
void RewriteMacrosAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
std::unique_ptr<raw_ostream> OS =
- CI.createDefaultOutputFile(true, getCurrentFile());
+ CI.createDefaultOutputFile(true, getCurrentFileOrBufferName());
if (!OS) return;
RewriteMacrosInInput(CI.getPreprocessor(), OS.get());
@@ -190,7 +190,7 @@ void RewriteMacrosAction::ExecuteAction() {
void RewriteTestAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
std::unique_ptr<raw_ostream> OS =
- CI.createDefaultOutputFile(false, getCurrentFile());
+ CI.createDefaultOutputFile(false, getCurrentFileOrBufferName());
if (!OS) return;
DoRewriteTest(CI.getPreprocessor(), OS.get());
@@ -265,7 +265,8 @@ public:
bool RewriteIncludesAction::BeginSourceFileAction(CompilerInstance &CI) {
if (!OutputStream) {
- OutputStream = CI.createDefaultOutputFile(true, getCurrentFile());
+ OutputStream =
+ CI.createDefaultOutputFile(true, getCurrentFileOrBufferName());
if (!OutputStream)
return false;
}
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 1631eccd7013..2e7baa3d9581 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -586,6 +586,7 @@ void InclusionRewriter::Process(FileID FileId,
LocalEOL, Line, /*EnsureNewline=*/ true);
WriteLineInfo(FileName, Line, FileType);
RawLex.SetKeepWhitespaceMode(false);
+ break;
}
default:
break;
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 36382e1438d5..10ca9a785699 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -265,8 +265,8 @@ namespace {
// Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ Diags.Report(Context->getFullLoc(Old->getBeginLoc()), RewriteFailedDiag)
+ << Old->getSourceRange();
return;
}
// Get the new text.
@@ -282,8 +282,8 @@ namespace {
}
if (SilenceRewriteMacroWarning)
return;
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ Diags.Report(Context->getFullLoc(Old->getBeginLoc()), RewriteFailedDiag)
+ << Old->getSourceRange();
}
void InsertText(SourceLocation Loc, StringRef Str,
@@ -541,7 +541,7 @@ namespace {
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
- if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ if (!LangOpts.ObjC)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
@@ -748,11 +748,11 @@ void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
if (!IFace->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
- SourceLocation StartLoc = IFace->getLocStart();
+ SourceLocation StartLoc = IFace->getBeginLoc();
do {
if (isa<ObjCInterfaceDecl>(*DI) &&
!cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
- StartLoc == (*DI)->getLocStart())
+ StartLoc == (*DI)->getBeginLoc())
DG.push_back(*DI);
else
break;
@@ -773,11 +773,11 @@ void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
if (!Proto->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
- SourceLocation StartLoc = Proto->getLocStart();
+ SourceLocation StartLoc = Proto->getBeginLoc();
do {
if (isa<ObjCProtocolDecl>(*DI) &&
!cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
- StartLoc == (*DI)->getLocStart())
+ StartLoc == (*DI)->getBeginLoc())
DG.push_back(*DI);
else
break;
@@ -923,17 +923,16 @@ void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
static bool objcSetPropertyDefined = false;
SourceLocation startGetterSetterLoc;
- if (PID->getLocStart().isValid()) {
- SourceLocation startLoc = PID->getLocStart();
+ if (PID->getBeginLoc().isValid()) {
+ SourceLocation startLoc = PID->getBeginLoc();
InsertText(startLoc, "// ");
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synthesize location");
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "@synthesize: can't find ';'");
startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
- }
- else
- startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd();
+ } else
+ startGetterSetterLoc = IMD ? IMD->getEndLoc() : CID->getEndLoc();
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
return; // FIXME: is this correct?
@@ -1061,7 +1060,7 @@ static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString) {
- SourceLocation startLoc = ClassDecl->getLocStart();
+ SourceLocation startLoc = ClassDecl->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiPtr = strchr(startBuf, ';');
// Replace the @class with typedefs corresponding to the classes.
@@ -1109,8 +1108,8 @@ void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
// nothing to rewrite.
if (Method->isImplicit())
return;
- SourceLocation LocStart = Method->getLocStart();
- SourceLocation LocEnd = Method->getLocEnd();
+ SourceLocation LocStart = Method->getBeginLoc();
+ SourceLocation LocEnd = Method->getEndLoc();
if (SM->getExpansionLineNumber(LocEnd) >
SM->getExpansionLineNumber(LocStart)) {
@@ -1129,7 +1128,7 @@ void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) {
}
void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
- SourceLocation LocStart = CatDecl->getLocStart();
+ SourceLocation LocStart = CatDecl->getBeginLoc();
// FIXME: handle category headers that are declared across multiple lines.
if (CatDecl->getIvarRBraceLoc().isValid()) {
@@ -1154,7 +1153,7 @@ void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
}
void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
- SourceLocation LocStart = PDecl->getLocStart();
+ SourceLocation LocStart = PDecl->getBeginLoc();
assert(PDecl->isThisDeclarationADefinition());
// FIXME: handle protocol headers that are declared across multiple lines.
@@ -1189,7 +1188,7 @@ void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
}
void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
- SourceLocation LocStart = (*D.begin())->getLocStart();
+ SourceLocation LocStart = (*D.begin())->getBeginLoc();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
@@ -1198,7 +1197,7 @@ void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
void
RewriteModernObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
- SourceLocation LocStart = DG[0]->getLocStart();
+ SourceLocation LocStart = DG[0]->getBeginLoc();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
@@ -1338,21 +1337,21 @@ void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
if (IMD) {
if (IMD->getIvarRBraceLoc().isValid()) {
- ReplaceText(IMD->getLocStart(), 1, "/** ");
+ ReplaceText(IMD->getBeginLoc(), 1, "/** ");
ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ ");
}
else {
- InsertText(IMD->getLocStart(), "// ");
+ InsertText(IMD->getBeginLoc(), "// ");
}
}
else
- InsertText(CID->getLocStart(), "// ");
+ InsertText(CID->getBeginLoc(), "// ");
for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
- SourceLocation LocStart = OMD->getLocStart();
- SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+ SourceLocation LocStart = OMD->getBeginLoc();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getBeginLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
@@ -1362,8 +1361,8 @@ void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
- SourceLocation LocStart = OMD->getLocStart();
- SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+ SourceLocation LocStart = OMD->getBeginLoc();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getBeginLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
@@ -1372,7 +1371,7 @@ void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
RewritePropertyImplDecl(I, IMD, CID);
- InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+ InsertText(IMD ? IMD->getEndLoc() : CID->getEndLoc(), "// ");
}
void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
@@ -1608,7 +1607,7 @@ Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
// replace break with goto __break_label
std::string buf;
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
buf = "goto __break_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("break"), buf);
@@ -1638,7 +1637,7 @@ Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) {
// replace continue with goto __continue_label
std::string buf;
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
buf = "goto __continue_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("continue"), buf);
@@ -1686,7 +1685,7 @@ Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
assert(!ObjCBcLabelNo.empty() &&
"ObjCForCollectionStmt - Label No stack empty");
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
StringRef elementName;
std::string elementTypeAsString;
@@ -1860,7 +1859,7 @@ static void Write_RethrowObject(std::string &buf) {
///
Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synchronized location");
@@ -1880,20 +1879,20 @@ Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S)
buf += "\n\tid sync_exit;";
buf += "\n\t} _sync_exit(_sync_obj);\n";
- // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // We can't use S->getSynchExpr()->getEndLoc() to find the end location, since
// the sync expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
- SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart();
+ SourceLocation RParenExprLoc = S->getSynchBody()->getBeginLoc();
const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc);
while (*RParenExprLocBuf != ')') RParenExprLocBuf--;
RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf);
- SourceLocation LBranceLoc = S->getSynchBody()->getLocStart();
+ SourceLocation LBranceLoc = S->getSynchBody()->getBeginLoc();
const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc);
assert (*LBraceLocBuf == '{');
ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf);
- SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd();
+ SourceLocation startRBraceLoc = S->getSynchBody()->getEndLoc();
assert((*SM->getCharacterData(startRBraceLoc) == '}') &&
"bogus @synchronized block");
@@ -1915,7 +1914,7 @@ void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
WarnAboutReturnGotoStmts(SubStmt);
if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
- Diags.Report(Context->getFullLoc(S->getLocStart()),
+ Diags.Report(Context->getFullLoc(S->getBeginLoc()),
TryFinallyContainsReturnDiag);
}
}
@@ -1923,7 +1922,7 @@ void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
SourceLocation startLoc = S->getAtLoc();
ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */");
- ReplaceText(S->getSubStmt()->getLocStart(), 1,
+ ReplaceText(S->getSubStmt()->getBeginLoc(), 1,
"{ __AtAutoreleasePool __autoreleasepool; ");
return nullptr;
@@ -1944,7 +1943,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
}
}
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @try location");
@@ -1958,7 +1957,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
VarDecl *catchDecl = Catch->getCatchParamDecl();
- startLoc = Catch->getLocStart();
+ startLoc = Catch->getBeginLoc();
bool AtRemoved = false;
if (catchDecl) {
QualType t = catchDecl->getType();
@@ -1967,7 +1966,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
if (IDecl) {
std::string Result;
- ConvertSourceLocationToLineDirective(Catch->getLocStart(), Result);
+ ConvertSourceLocationToLineDirective(Catch->getBeginLoc(), Result);
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @catch location");
@@ -1988,7 +1987,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
Result += "_"; Result += catchDecl->getNameAsString();
Result += "; ";
- SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart();
+ SourceLocation lBraceLoc = Catch->getCatchBody()->getBeginLoc();
ReplaceText(lBraceLoc, 1, Result);
AtRemoved = true;
}
@@ -2001,7 +2000,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
}
if (finalStmt) {
buf.clear();
- SourceLocation FinallyLoc = finalStmt->getLocStart();
+ SourceLocation FinallyLoc = finalStmt->getBeginLoc();
if (noCatch) {
ConvertSourceLocationToLineDirective(FinallyLoc, buf);
@@ -2013,15 +2012,15 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
buf += "catch (id e) {_rethrow = e;}\n";
}
- SourceLocation startFinalLoc = finalStmt->getLocStart();
+ SourceLocation startFinalLoc = finalStmt->getBeginLoc();
ReplaceText(startFinalLoc, 8, buf);
Stmt *body = finalStmt->getFinallyBody();
- SourceLocation startFinalBodyLoc = body->getLocStart();
+ SourceLocation startFinalBodyLoc = body->getBeginLoc();
buf.clear();
Write_RethrowObject(buf);
ReplaceText(startFinalBodyLoc, 1, buf);
- SourceLocation endFinalBodyLoc = body->getLocEnd();
+ SourceLocation endFinalBodyLoc = body->getEndLoc();
ReplaceText(endFinalBodyLoc, 1, "}\n}");
// Now check for any return/continue/go statements within the @try.
WarnAboutReturnGotoStmts(S->getTryBody());
@@ -2035,7 +2034,7 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// been rewritten! (which implies the SourceLocation's are invalid).
Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @throw location");
@@ -2052,7 +2051,7 @@ Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
assert((*wBuf == 'w') && "@throw: can't find 'w'");
ReplaceText(startLoc, wBuf-startBuf+1, buf);
- SourceLocation endLoc = S->getLocEnd();
+ SourceLocation endLoc = S->getEndLoc();
const char *endBuf = SM->getCharacterData(endLoc);
const char *semiBuf = strchr(endBuf, ';');
assert((*semiBuf == ';') && "@throw: can't find ';'");
@@ -2097,8 +2096,8 @@ RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE =
- new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(*Context, FD, false, msgSendType,
+ VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
QualType pToFunc = Context->getPointerType(msgSendType);
@@ -2108,9 +2107,8 @@ RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
- FT->getCallResultType(*Context),
- VK_RValue, EndLoc);
+ CallExpr *Exp = CallExpr::Create(
+ *Context, ICE, Args, FT->getCallResultType(*Context), VK_RValue, EndLoc);
return Exp;
}
@@ -2170,8 +2168,8 @@ void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
Loc = ECE->getLParenLoc();
EndLoc = ECE->getRParenLoc();
} else {
- Loc = E->getLocStart();
- EndLoc = E->getLocEnd();
+ Loc = E->getBeginLoc();
+ EndLoc = E->getEndLoc();
}
// This will defend against trying to rewrite synthesized expressions.
if (Loc.isInvalid() || EndLoc.isInvalid())
@@ -2296,13 +2294,13 @@ void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) {
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
- startLoc = E->getLocStart();
+ startLoc = E->getBeginLoc();
startLoc = SM->getExpansionLoc(startLoc);
const char *endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
else {
- SourceLocation X = ND->getLocEnd();
+ SourceLocation X = ND->getEndLoc();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
@@ -2585,12 +2583,11 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(S),
strType, nullptr, SC_Static);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
- SourceLocation());
- Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
+ Expr *Unop = new (Context)
+ UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2625,8 +2622,8 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
- SourceLocation StartLoc = Exp->getLocStart();
- SourceLocation EndLoc = Exp->getLocEnd();
+ SourceLocation StartLoc = Exp->getBeginLoc();
+ SourceLocation EndLoc = Exp->getEndLoc();
// Synthesize a call to objc_msgSend().
SmallVector<Expr*, 4> MsgExprs;
@@ -2674,12 +2671,11 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, MsgSendFlavor, false, msgSendType, VK_LValue, SourceLocation());
- CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, DRE);
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(
+ Context, Context->getPointerType(Context->VoidTy), CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
@@ -2692,8 +2688,8 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context)
- CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2709,8 +2705,8 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
- SourceLocation StartLoc = Exp->getLocStart();
- SourceLocation EndLoc = Exp->getLocEnd();
+ SourceLocation StartLoc = Exp->getBeginLoc();
+ SourceLocation EndLoc = Exp->getEndLoc();
// Build the expression: __NSContainer_literal(int, ...).arr
QualType IntQT = Context->IntTy;
@@ -2718,9 +2714,8 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
getSimpleFunctionType(Context->VoidTy, IntQT, true);
std::string NSArrayFName("__NSContainer_literal");
FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
- DeclRefExpr *NSArrayDRE =
- new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue,
- SourceLocation());
+ DeclRefExpr *NSArrayDRE = new (Context) DeclRefExpr(
+ *Context, NSArrayFD, false, NSArrayFType, VK_RValue, SourceLocation());
SmallVector<Expr*, 16> InitExprs;
unsigned NumElements = Exp->getNumElements();
@@ -2733,8 +2728,8 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
for (unsigned i = 0; i < NumElements; i++)
InitExprs.push_back(Exp->getElement(i));
Expr *NSArrayCallExpr =
- new (Context) CallExpr(*Context, NSArrayDRE, InitExprs,
- NSArrayFType, VK_LValue, SourceLocation());
+ CallExpr::Create(*Context, NSArrayDRE, InitExprs, NSArrayFType, VK_LValue,
+ SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -2797,12 +2792,11 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, MsgSendFlavor, false, msgSendType, VK_LValue, SourceLocation());
- CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, DRE);
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(
+ Context, Context->getPointerType(Context->VoidTy), CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
@@ -2815,8 +2809,8 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context)
- CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2832,8 +2826,8 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
SynthGetClassFunctionDecl();
FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
- SourceLocation StartLoc = Exp->getLocStart();
- SourceLocation EndLoc = Exp->getLocEnd();
+ SourceLocation StartLoc = Exp->getBeginLoc();
+ SourceLocation EndLoc = Exp->getEndLoc();
// Build the expression: __NSContainer_literal(int, ...).arr
QualType IntQT = Context->IntTy;
@@ -2841,9 +2835,8 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
getSimpleFunctionType(Context->VoidTy, IntQT, true);
std::string NSDictFName("__NSContainer_literal");
FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
- DeclRefExpr *NSDictDRE =
- new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue,
- SourceLocation());
+ DeclRefExpr *NSDictDRE = new (Context) DeclRefExpr(
+ *Context, NSDictFD, false, NSDictFType, VK_RValue, SourceLocation());
SmallVector<Expr*, 16> KeyExprs;
SmallVector<Expr*, 16> ValueExprs;
@@ -2864,8 +2857,8 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
// (const id [])objects
Expr *NSValueCallExpr =
- new (Context) CallExpr(*Context, NSDictDRE, ValueExprs,
- NSDictFType, VK_LValue, SourceLocation());
+ CallExpr::Create(*Context, NSDictDRE, ValueExprs, NSDictFType, VK_LValue,
+ SourceLocation());
FieldDecl *ARRFD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -2883,9 +2876,8 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
CK_BitCast,
DictLiteralValueME);
// (const id <NSCopying> [])keys
- Expr *NSKeyCallExpr =
- new (Context) CallExpr(*Context, NSDictDRE, KeyExprs,
- NSDictFType, VK_LValue, SourceLocation());
+ Expr *NSKeyCallExpr = CallExpr::Create(
+ *Context, NSDictDRE, KeyExprs, NSDictFType, VK_LValue, SourceLocation());
MemberExpr *DictLiteralKeyME = new (Context)
MemberExpr(NSKeyCallExpr, false, SourceLocation(), ARRFD,
@@ -2951,12 +2943,11 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, MsgSendFlavor, false, msgSendType, VK_LValue, SourceLocation());
- CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
- Context->getPointerType(Context->VoidTy),
- CK_BitCast, DRE);
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(
+ Context, Context->getPointerType(Context->VoidTy), CK_BitCast, DRE);
// Now do the "normal" pointer to function cast.
QualType castType =
@@ -2969,8 +2960,8 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context)
- CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, EndLoc);
ReplaceStmt(Exp, CE);
return CE;
}
@@ -3070,7 +3061,7 @@ void RewriteModernObjC::RewriteLineDirective(const Decl *D) {
LineString += "\"";
else LineString += "\"\n";
- Location = D->getLocStart();
+ Location = D->getBeginLoc();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isExternC() && !FD->isMain()) {
const DeclContext *DC = FD->getDeclContext();
@@ -3100,10 +3091,9 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method) {
// Now do the "normal" pointer to function cast.
- QualType castType = getSimpleFunctionType(returnType, ArgTypes,
- Method ? Method->isVariadic()
- : false);
- castType = Context->getPointerType(castType);
+ QualType FuncType = getSimpleFunctionType(
+ returnType, ArgTypes, Method ? Method->isVariadic() : false);
+ QualType castType = Context->getPointerType(FuncType);
// build type for containing the objc_msgSend_stret object.
static unsigned stretCount=0;
@@ -3169,7 +3159,7 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
else {
assert(CurMethodDef && "SynthMsgSendStretCallExpr - CurMethodDef is null");
- FunLocStart = CurMethodDef->getLocStart();
+ FunLocStart = CurMethodDef->getBeginLoc();
}
InsertText(FunLocStart, str);
@@ -3177,13 +3167,13 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
// AST for __Stretn(receiver, args).s;
IdentifierInfo *ID = &Context->Idents.get(name);
- FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
- SourceLocation(), ID, castType,
- nullptr, SC_Extern, false, false);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue,
- SourceLocation());
- CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs,
- castType, VK_LValue, SourceLocation());
+ FunctionDecl *FD =
+ FunctionDecl::Create(*Context, TUDecl, SourceLocation(), SourceLocation(),
+ ID, FuncType, nullptr, SC_Extern, false, false);
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, FD, false, castType, VK_RValue, SourceLocation());
+ CallExpr *STCE = CallExpr::Create(*Context, DRE, MsgExprs, castType,
+ VK_LValue, SourceLocation());
FieldDecl *FieldD = FieldDecl::Create(*Context, nullptr, SourceLocation(),
SourceLocation(),
@@ -3249,7 +3239,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
- new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ new (Context) DeclRefExpr(*Context,
+ CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue,
@@ -3280,12 +3271,11 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
- DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
- false, superType, VK_LValue,
- SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- superType, VK_LValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
+ VK_LValue, SourceLocation());
+ SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
+ VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -3345,7 +3335,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
- new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ new (Context) DeclRefExpr(*Context,
+ CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue, SourceLocation()))
@@ -3375,11 +3366,11 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
- DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
- false, superType, VK_LValue,
- SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- superType, VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
+ VK_LValue, SourceLocation());
+ SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
+ VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -3519,8 +3510,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, MsgSendFlavor, false, msgSendType, VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
// If we don't do this cast, we get the following bizarre warning/note:
@@ -3543,8 +3534,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context)
- CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3563,8 +3554,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
}
Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
- Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
- Exp->getLocEnd());
+ Stmt *ReplacingStmt =
+ SynthMessageExpr(Exp, Exp->getBeginLoc(), Exp->getEndLoc());
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
@@ -3597,10 +3588,9 @@ Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, getProtocolType(),
nullptr, SC_Extern);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
- VK_LValue, SourceLocation());
- CastExpr *castExpr =
- NoTypeInfoCStyleCastExpr(
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, VD, false, getProtocolType(), VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(
Context, Context->getPointerType(DRE->getType()), CK_BitCast, DRE);
ReplaceStmt(Exp, castExpr);
ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
@@ -3896,7 +3886,7 @@ void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
IVD; IVD = IVD->getNextIvar())
IVars.push_back(IVD);
- SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocStart = CDecl->getBeginLoc();
SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
const char *startBuf = SM->getCharacterData(LocStart);
@@ -4461,9 +4451,9 @@ static void BuildUniqueMethodName(std::string &Name,
}
void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
- //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
- //SourceLocation FunLocStart = MD->getLocStart();
- SourceLocation FunLocStart = MD->getLocStart();
+ // fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ // SourceLocation FunLocStart = MD->getBeginLoc();
+ SourceLocation FunLocStart = MD->getBeginLoc();
std::string FuncName;
BuildUniqueMethodName(FuncName, MD);
SynthesizeBlockLiterals(FunLocStart, FuncName);
@@ -4656,9 +4646,8 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
- Exp->getType(), VK_RValue,
- SourceLocation());
+ CallExpr *CE = CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(),
+ VK_RValue, SourceLocation());
return CE;
}
@@ -4778,7 +4767,7 @@ void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
std::string Str = "(";
Str += TypeString;
Str += ")";
- InsertText(IC->getSubExpr()->getLocStart(), Str);
+ InsertText(IC->getSubExpr()->getBeginLoc(), Str);
}
void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
@@ -5036,7 +5025,7 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
// Use variable's location which is good for this case.
DeclLoc = ND->getLocation();
const char *startBuf = SM->getCharacterData(DeclLoc);
- SourceLocation X = ND->getLocEnd();
+ SourceLocation X = ND->getEndLoc();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
std::string Name(ND->getNameAsString());
@@ -5069,7 +5058,7 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef);
else {
assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
- FunLocStart = CurMethodDef->getLocStart();
+ FunLocStart = CurMethodDef->getBeginLoc();
}
InsertText(FunLocStart, ByrefType);
@@ -5156,7 +5145,7 @@ void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl,
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
- startLoc = E->getLocStart();
+ startLoc = E->getBeginLoc();
startLoc = SM->getExpansionLoc(startLoc);
endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
@@ -5287,15 +5276,15 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
Tag += FuncName + "_block_impl_" + BlockNumber;
FD = SynthBlockInitFunctionDecl(Tag);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, FD, false, FType, VK_RValue, SourceLocation());
SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
- DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
- VK_LValue, SourceLocation());
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(
+ *Context, FD, false, FD->getType(), VK_LValue, SourceLocation());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
InitExprs.push_back(castExpr);
@@ -5306,15 +5295,11 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr =
- new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
- Context->VoidPtrTy,
- VK_LValue,
- SourceLocation()),
- UO_AddrOf,
- Context->getPointerType(Context->VoidPtrTy),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
+ VK_LValue, SourceLocation()),
+ UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -5326,7 +5311,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
@@ -5337,13 +5322,13 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ Arg = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
VK_LValue, SourceLocation());
Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
CK_BitCast, Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
@@ -5372,8 +5357,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
- SourceLocation());
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
bool isNestedCapturedVar = false;
if (block)
for (const auto &CI : block->captures()) {
@@ -5405,8 +5390,8 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
- NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- FType, VK_LValue, SourceLocation());
+ NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
+ SourceLocation());
if (GlobalBlockExpr) {
assert (!GlobalConstructionExp &&
@@ -5537,8 +5522,8 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
#if 0
// Before we rewrite it, put the original message expression in a comment.
- SourceLocation startLoc = MessExpr->getLocStart();
- SourceLocation endLoc = MessExpr->getLocEnd();
+ SourceLocation startLoc = MessExpr->getBeginLoc();
+ SourceLocation endLoc = MessExpr->getEndLoc();
const char *startBuf = SM->getCharacterData(startLoc);
const char *endBuf = SM->getCharacterData(endLoc);
@@ -5676,7 +5661,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
- InsertText(ICE->getSubExpr()->getLocStart(), Str);
+ InsertText(ICE->getSubExpr()->getBeginLoc(), Str);
delete S;
return Replacement;
}
@@ -7499,9 +7484,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
SourceLocation(), &Context->Idents.get(IvarOffsetName),
Context->UnsignedLongTy, nullptr,
SC_Extern);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false,
- Context->UnsignedLongTy, VK_LValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, NewVD, false, Context->UnsignedLongTy,
+ VK_LValue, SourceLocation());
BinaryOperator *addExpr =
new (Context) BinaryOperator(castExpr, DRE, BO_Add,
Context->getPointerType(Context->CharTy),
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 6229351e8f54..3e018800b909 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -216,8 +216,8 @@ namespace {
// Measure the old text.
int Size = Rewrite.getRangeSize(SrcRange);
if (Size == -1) {
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ Diags.Report(Context->getFullLoc(Old->getBeginLoc()), RewriteFailedDiag)
+ << Old->getSourceRange();
return;
}
// Get the new text.
@@ -233,8 +233,8 @@ namespace {
}
if (SilenceRewriteMacroWarning)
return;
- Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
- << Old->getSourceRange();
+ Diags.Report(Context->getFullLoc(Old->getBeginLoc()), RewriteFailedDiag)
+ << Old->getSourceRange();
}
void InsertText(SourceLocation Loc, StringRef Str,
@@ -448,7 +448,7 @@ namespace {
// FIXME: This predicate seems like it would be useful to add to ASTContext.
bool isObjCType(QualType T) {
- if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ if (!LangOpts.ObjC)
return false;
QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
@@ -680,11 +680,11 @@ void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
if (!IFace->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
- SourceLocation StartLoc = IFace->getLocStart();
+ SourceLocation StartLoc = IFace->getBeginLoc();
do {
if (isa<ObjCInterfaceDecl>(*DI) &&
!cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
- StartLoc == (*DI)->getLocStart())
+ StartLoc == (*DI)->getBeginLoc())
DG.push_back(*DI);
else
break;
@@ -699,11 +699,11 @@ void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
if (!Proto->isThisDeclarationADefinition()) {
SmallVector<Decl *, 8> DG;
- SourceLocation StartLoc = Proto->getLocStart();
+ SourceLocation StartLoc = Proto->getBeginLoc();
do {
if (isa<ObjCProtocolDecl>(*DI) &&
!cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
- StartLoc == (*DI)->getLocStart())
+ StartLoc == (*DI)->getBeginLoc())
DG.push_back(*DI);
else
break;
@@ -769,7 +769,7 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCCategoryImplDecl *CID) {
static bool objcGetPropertyDefined = false;
static bool objcSetPropertyDefined = false;
- SourceLocation startLoc = PID->getLocStart();
+ SourceLocation startLoc = PID->getBeginLoc();
InsertText(startLoc, "// ");
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synthesize location");
@@ -901,11 +901,11 @@ static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
const std::string &typedefString) {
- SourceLocation startLoc = ClassDecl->getLocStart();
- const char *startBuf = SM->getCharacterData(startLoc);
- const char *semiPtr = strchr(startBuf, ';');
- // Replace the @class with typedefs corresponding to the classes.
- ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+ SourceLocation startLoc = ClassDecl->getBeginLoc();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr - startBuf + 1, typedefString);
}
void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
@@ -945,8 +945,8 @@ void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
// nothing to rewrite.
if (Method->isImplicit())
return;
- SourceLocation LocStart = Method->getLocStart();
- SourceLocation LocEnd = Method->getLocEnd();
+ SourceLocation LocStart = Method->getBeginLoc();
+ SourceLocation LocEnd = Method->getEndLoc();
if (SM->getExpansionLineNumber(LocEnd) >
SM->getExpansionLineNumber(LocStart)) {
@@ -965,7 +965,7 @@ void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) {
}
void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
- SourceLocation LocStart = CatDecl->getLocStart();
+ SourceLocation LocStart = CatDecl->getBeginLoc();
// FIXME: handle category headers that are declared across multiple lines.
ReplaceText(LocStart, 0, "// ");
@@ -983,7 +983,7 @@ void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
}
void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
- SourceLocation LocStart = PDecl->getLocStart();
+ SourceLocation LocStart = PDecl->getBeginLoc();
assert(PDecl->isThisDeclarationADefinition());
// FIXME: handle protocol headers that are declared across multiple lines.
@@ -1018,7 +1018,7 @@ void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
}
void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
- SourceLocation LocStart = (*D.begin())->getLocStart();
+ SourceLocation LocStart = (*D.begin())->getBeginLoc();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
@@ -1027,7 +1027,7 @@ void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
void
RewriteObjC::RewriteForwardProtocolDecl(const SmallVectorImpl<Decl *> &DG) {
- SourceLocation LocStart = DG[0]->getLocStart();
+ SourceLocation LocStart = DG[0]->getBeginLoc();
if (LocStart.isInvalid())
llvm_unreachable("Invalid SourceLocation");
// FIXME: handle forward protocol that are declared across multiple lines.
@@ -1165,13 +1165,13 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
- InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// ");
+ InsertText(IMD ? IMD->getBeginLoc() : CID->getBeginLoc(), "// ");
for (auto *OMD : IMD ? IMD->instance_methods() : CID->instance_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
- SourceLocation LocStart = OMD->getLocStart();
- SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+ SourceLocation LocStart = OMD->getBeginLoc();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getBeginLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
@@ -1181,8 +1181,8 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
for (auto *OMD : IMD ? IMD->class_methods() : CID->class_methods()) {
std::string ResultStr;
RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
- SourceLocation LocStart = OMD->getLocStart();
- SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+ SourceLocation LocStart = OMD->getBeginLoc();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getBeginLoc();
const char *startBuf = SM->getCharacterData(LocStart);
const char *endBuf = SM->getCharacterData(LocEnd);
@@ -1191,7 +1191,7 @@ void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
for (auto *I : IMD ? IMD->property_impls() : CID->property_impls())
RewritePropertyImplDecl(I, IMD, CID);
- InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+ InsertText(IMD ? IMD->getEndLoc() : CID->getEndLoc(), "// ");
}
void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
@@ -1412,7 +1412,7 @@ Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) {
// replace break with goto __break_label
std::string buf;
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
buf = "goto __break_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("break"), buf);
@@ -1429,7 +1429,7 @@ Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
// replace continue with goto __continue_label
std::string buf;
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
buf = "goto __continue_label_";
buf += utostr(ObjCBcLabelNo.back());
ReplaceText(startLoc, strlen("continue"), buf);
@@ -1477,7 +1477,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
assert(!ObjCBcLabelNo.empty() &&
"ObjCForCollectionStmt - Label No stack empty");
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
StringRef elementName;
std::string elementTypeAsString;
@@ -1641,7 +1641,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
///
Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @synchronized location");
@@ -1651,10 +1651,10 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
const char *lparenBuf = startBuf;
while (*lparenBuf != '(') lparenBuf++;
ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
- // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // We can't use S->getSynchExpr()->getEndLoc() to find the end location, since
// the sync expression is typically a message expression that's already
// been rewritten! (which implies the SourceLocation's are invalid).
- SourceLocation endLoc = S->getSynchBody()->getLocStart();
+ SourceLocation endLoc = S->getSynchBody()->getBeginLoc();
const char *endBuf = SM->getCharacterData(endLoc);
while (*endBuf != ')') endBuf--;
SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf);
@@ -1667,7 +1667,7 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
buf += "objc_exception_try_enter(&_stack);\n";
buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
ReplaceText(rparenLoc, 1, buf);
- startLoc = S->getSynchBody()->getLocEnd();
+ startLoc = S->getSynchBody()->getEndLoc();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '}') && "bogus @synchronized block");
@@ -1719,7 +1719,7 @@ void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
WarnAboutReturnGotoStmts(SubStmt);
if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
- Diags.Report(Context->getFullLoc(S->getLocStart()),
+ Diags.Report(Context->getFullLoc(S->getBeginLoc()),
TryFinallyContainsReturnDiag);
}
}
@@ -1742,7 +1742,7 @@ void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
RewriteTryReturnStmts(SubStmt);
}
if (isa<ReturnStmt>(S)) {
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiBuf = strchr(startBuf, ';');
assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'");
@@ -1763,7 +1763,7 @@ void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
RewriteSyncReturnStmts(SubStmt, syncExitBuf);
}
if (isa<ReturnStmt>(S)) {
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
const char *semiBuf = strchr(startBuf, ';');
@@ -1782,7 +1782,7 @@ void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @try location");
@@ -1798,7 +1798,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
ReplaceText(startLoc, 4, buf);
- startLoc = S->getTryBody()->getLocEnd();
+ startLoc = S->getTryBody()->getEndLoc();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '}') && "bogus @try block");
@@ -1829,7 +1829,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
buf = "if ("; // we are generating code for the first catch clause
else
buf = "else if (";
- startLoc = Catch->getLocStart();
+ startLoc = Catch->getBeginLoc();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @catch location");
@@ -1839,7 +1839,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
if (Catch->hasEllipsis()) {
// Now rewrite the body...
lastCatchBody = Catch->getCatchBody();
- SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ SourceLocation bodyLoc = lastCatchBody->getBeginLoc();
const char *bodyBuf = SM->getCharacterData(bodyLoc);
assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' &&
"bogus @catch paren location");
@@ -1866,7 +1866,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// Now rewrite the body...
lastCatchBody = Catch->getCatchBody();
SourceLocation rParenLoc = Catch->getRParenLoc();
- SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ SourceLocation bodyLoc = lastCatchBody->getBeginLoc();
const char *bodyBuf = SM->getCharacterData(bodyLoc);
const char *rParenBuf = SM->getCharacterData(rParenLoc);
assert((*rParenBuf == ')') && "bogus @catch paren location");
@@ -1881,7 +1881,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
}
// Complete the catch list...
if (lastCatchBody) {
- SourceLocation bodyLoc = lastCatchBody->getLocEnd();
+ SourceLocation bodyLoc = lastCatchBody->getEndLoc();
assert(*SM->getCharacterData(bodyLoc) == '}' &&
"bogus @catch body location");
@@ -1897,18 +1897,18 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
InsertText(bodyLoc, buf);
// Set lastCurlyLoc
- lastCurlyLoc = lastCatchBody->getLocEnd();
+ lastCurlyLoc = lastCatchBody->getEndLoc();
}
if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) {
- startLoc = finalStmt->getLocStart();
+ startLoc = finalStmt->getBeginLoc();
startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @finally start");
ReplaceText(startLoc, 8, "/* @finally */");
Stmt *body = finalStmt->getFinallyBody();
- SourceLocation startLoc = body->getLocStart();
- SourceLocation endLoc = body->getLocEnd();
+ SourceLocation startLoc = body->getBeginLoc();
+ SourceLocation endLoc = body->getEndLoc();
assert(*SM->getCharacterData(startLoc) == '{' &&
"bogus @finally body location");
assert(*SM->getCharacterData(endLoc) == '}' &&
@@ -1920,7 +1920,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n");
// Set lastCurlyLoc
- lastCurlyLoc = body->getLocEnd();
+ lastCurlyLoc = body->getEndLoc();
// Now check for any return/continue/go statements within the @try.
WarnAboutReturnGotoStmts(S->getTryBody());
@@ -1950,7 +1950,7 @@ Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// been rewritten! (which implies the SourceLocation's are invalid).
Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
// Get the start location and compute the semi location.
- SourceLocation startLoc = S->getLocStart();
+ SourceLocation startLoc = S->getBeginLoc();
const char *startBuf = SM->getCharacterData(startLoc);
assert((*startBuf == '@') && "bogus @throw location");
@@ -2009,7 +2009,7 @@ RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
QualType msgSendType = FD->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType,
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(*Context, FD, false, msgSendType,
VK_LValue, SourceLocation());
// Now, we cast the reference to a pointer to the objc_msgSend type.
@@ -2020,9 +2020,8 @@ RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *Exp = new (Context) CallExpr(*Context, ICE, Args,
- FT->getCallResultType(*Context),
- VK_RValue, EndLoc);
+ CallExpr *Exp = CallExpr::Create(
+ *Context, ICE, Args, FT->getCallResultType(*Context), VK_RValue, EndLoc);
return Exp;
}
@@ -2082,8 +2081,8 @@ void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
Loc = ECE->getLParenLoc();
EndLoc = ECE->getRParenLoc();
} else {
- Loc = E->getLocStart();
- EndLoc = E->getLocEnd();
+ Loc = E->getBeginLoc();
+ EndLoc = E->getEndLoc();
}
// This will defend against trying to rewrite synthesized expressions.
if (Loc.isInvalid() || EndLoc.isInvalid())
@@ -2204,13 +2203,13 @@ void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) {
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
- startLoc = E->getLocStart();
+ startLoc = E->getBeginLoc();
startLoc = SM->getExpansionLoc(startLoc);
const char *endBuf = SM->getCharacterData(startLoc);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
}
else {
- SourceLocation X = ND->getLocEnd();
+ SourceLocation X = ND->getEndLoc();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
@@ -2506,12 +2505,11 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(S),
strType, nullptr, SC_Static);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
- SourceLocation());
- Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
+ Expr *Unop = new (Context)
+ UnaryOperator(DRE, UO_AddrOf, Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation(), false);
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
CK_CPointerToObjCPointerCast, Unop);
@@ -2589,9 +2587,9 @@ CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavo
SmallVectorImpl<Expr*> &MsgExprs,
ObjCMethodDecl *Method) {
// Create a reference to the objc_msgSend_stret() declaration.
- DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
- false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *STDRE =
+ new (Context) DeclRefExpr(*Context, MsgSendStretFlavor, false,
+ msgSendType, VK_LValue, SourceLocation());
// Need to cast objc_msgSend_stret to "void *" (see above comment).
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(Context->VoidTy),
@@ -2608,8 +2606,8 @@ CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavo
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *STCE = new (Context) CallExpr(
- *Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, SourceLocation());
+ CallExpr *STCE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, SourceLocation());
return STCE;
}
@@ -2664,7 +2662,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
- new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ new (Context) DeclRefExpr(*Context,
+ CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue,
@@ -2697,12 +2696,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
- DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
- false, superType, VK_LValue,
- SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- superType, VK_LValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
+ VK_LValue, SourceLocation());
+ SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
+ VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2759,7 +2757,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
InitExprs.push_back(
NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
CK_BitCast,
- new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ new (Context) DeclRefExpr(*Context,
+ CurMethodDef->getSelfDecl(),
false,
Context->getObjCIdType(),
VK_RValue, SourceLocation()))
@@ -2792,11 +2791,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
if (LangOpts.MicrosoftExt) {
SynthSuperConstructorFunctionDecl();
// Simulate a constructor call...
- DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperConstructorFunctionDecl,
- false, superType, VK_LValue,
- SourceLocation());
- SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- superType, VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, SuperConstructorFunctionDecl, false, superType,
+ VK_LValue, SourceLocation());
+ SuperRep = CallExpr::Create(*Context, DRE, InitExprs, superType,
+ VK_LValue, SourceLocation());
// The code for super is a little tricky to prevent collision with
// the structure definition in the header. The rewriter has it's own
// internal definition (__rw_objc_super) that is uses. This is why
@@ -2936,8 +2935,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
QualType msgSendType = MsgSendFlavor->getType();
// Create a reference to the objc_msgSend() declaration.
- DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, MsgSendFlavor, false, msgSendType, VK_LValue, SourceLocation());
// Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
// If we don't do this cast, we get the following bizarre warning/note:
@@ -2960,8 +2959,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- CallExpr *CE = new (Context)
- CallExpr(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue, EndLoc);
+ CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
+ VK_RValue, EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3008,8 +3007,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
}
Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
- Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
- Exp->getLocEnd());
+ Stmt *ReplacingStmt =
+ SynthMessageExpr(Exp, Exp->getBeginLoc(), Exp->getEndLoc());
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
@@ -3041,8 +3040,8 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
SourceLocation(), ID, getProtocolType(),
nullptr, SC_Extern);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
- VK_LValue, SourceLocation());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(
+ *Context, VD, false, getProtocolType(), VK_LValue, SourceLocation());
Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
Context->getPointerType(DRE->getType()),
VK_RValue, OK_Ordinary, SourceLocation(), false);
@@ -3093,7 +3092,7 @@ void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
return;
ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
int NumIvars = CDecl->ivar_size();
- SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocStart = CDecl->getBeginLoc();
SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
const char *startBuf = SM->getCharacterData(LocStart);
@@ -3635,9 +3634,9 @@ static void BuildUniqueMethodName(std::string &Name,
}
void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
- //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
- //SourceLocation FunLocStart = MD->getLocStart();
- SourceLocation FunLocStart = MD->getLocStart();
+ // fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ // SourceLocation FunLocStart = MD->getBeginLoc();
+ SourceLocation FunLocStart = MD->getBeginLoc();
std::string FuncName;
BuildUniqueMethodName(FuncName, MD);
SynthesizeBlockLiterals(FunLocStart, FuncName);
@@ -3811,9 +3810,8 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
E = Exp->arg_end(); I != E; ++I) {
BlkExprs.push_back(*I);
}
- CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs,
- Exp->getType(), VK_RValue,
- SourceLocation());
+ CallExpr *CE = CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(),
+ VK_RValue, SourceLocation());
return CE;
}
@@ -4179,7 +4177,7 @@ void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
// Use variable's location which is good for this case.
DeclLoc = ND->getLocation();
const char *startBuf = SM->getCharacterData(DeclLoc);
- SourceLocation X = ND->getLocEnd();
+ SourceLocation X = ND->getEndLoc();
X = SM->getExpansionLoc(X);
const char *endBuf = SM->getCharacterData(X);
std::string Name(ND->getNameAsString());
@@ -4212,7 +4210,7 @@ void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
else {
assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
- FunLocStart = CurMethodDef->getLocStart();
+ FunLocStart = CurMethodDef->getBeginLoc();
}
InsertText(FunLocStart, ByrefType);
if (Ty.isObjCGCWeak()) {
@@ -4274,7 +4272,7 @@ void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
startLoc = ECE->getLParenLoc();
else
- startLoc = E->getLocStart();
+ startLoc = E->getBeginLoc();
startLoc = SM->getExpansionLoc(startLoc);
endBuf = SM->getCharacterData(startLoc);
ByrefType += " " + Name;
@@ -4411,17 +4409,17 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Simulate a constructor call...
FD = SynthBlockInitFunctionDecl(Tag);
- DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
- SourceLocation());
+ DeclRefExpr *DRE = new (Context)
+ DeclRefExpr(*Context, FD, false, FType, VK_RValue, SourceLocation());
SmallVector<Expr*, 4> InitExprs;
// Initialize the block function.
FD = SynthBlockInitFunctionDecl(Func);
- DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
- VK_LValue, SourceLocation());
- CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
- CK_BitCast, Arg);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(
+ *Context, FD, false, FD->getType(), VK_LValue, SourceLocation());
+ CastExpr *castExpr =
+ NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, CK_BitCast, Arg);
InitExprs.push_back(castExpr);
// Initialize the block descriptor.
@@ -4430,15 +4428,11 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
VarDecl *NewVD = VarDecl::Create(
*Context, TUDecl, SourceLocation(), SourceLocation(),
&Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
- UnaryOperator *DescRefExpr =
- new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
- Context->VoidPtrTy,
- VK_LValue,
- SourceLocation()),
- UO_AddrOf,
- Context->getPointerType(Context->VoidPtrTy),
- VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ UnaryOperator *DescRefExpr = new (Context) UnaryOperator(
+ new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
+ VK_LValue, SourceLocation()),
+ UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
InitExprs.push_back(DescRefExpr);
// Add initializers for any closure decl refs.
@@ -4450,8 +4444,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (isObjCType((*I)->getType())) {
// FIXME: Conform to ABI ([[obj retain] autorelease]).
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
- SourceLocation());
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
@@ -4461,14 +4455,14 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
- SourceLocation());
- Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
- CK_BitCast, Arg);
+ Arg = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, CK_BitCast,
+ Arg);
} else {
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
- SourceLocation());
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
@@ -4495,8 +4489,8 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
FD = SynthBlockInitFunctionDecl((*I)->getName());
- Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
- SourceLocation());
+ Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
bool isNestedCapturedVar = false;
if (block)
for (const auto &CI : block->captures()) {
@@ -4527,11 +4521,11 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
Context->IntTy, SourceLocation());
InitExprs.push_back(FlagExp);
}
- NewRep = new (Context) CallExpr(*Context, DRE, InitExprs,
- FType, VK_LValue, SourceLocation());
- NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
- Context->getPointerType(NewRep->getType()),
- VK_RValue, OK_Ordinary, SourceLocation(), false);
+ NewRep = CallExpr::Create(*Context, DRE, InitExprs, FType, VK_LValue,
+ SourceLocation());
+ NewRep = new (Context) UnaryOperator(
+ NewRep, UO_AddrOf, Context->getPointerType(NewRep->getType()), VK_RValue,
+ OK_Ordinary, SourceLocation(), false);
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
BlockDeclRefs.clear();
@@ -4632,8 +4626,8 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
#if 0
// Before we rewrite it, put the original message expression in a comment.
- SourceLocation startLoc = MessExpr->getLocStart();
- SourceLocation endLoc = MessExpr->getLocEnd();
+ SourceLocation startLoc = MessExpr->getBeginLoc();
+ SourceLocation endLoc = MessExpr->getEndLoc();
const char *startBuf = SM->getCharacterData(startLoc);
const char *endBuf = SM->getCharacterData(endLoc);
@@ -4760,7 +4754,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
const std::string &Str = Buf.str();
printf("CAST = %s\n", &Str[0]);
- InsertText(ICE->getSubExpr()->getLocStart(), Str);
+ InsertText(ICE->getSubExpr()->getBeginLoc(), Str);
delete S;
return Replacement;
}
@@ -5873,8 +5867,8 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
CK_BitCast,
IV->getBase());
// Don't forget the parens to enforce the proper binding.
- ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
- IV->getBase()->getLocEnd(), castExpr);
+ ParenExpr *PE = new (Context) ParenExpr(
+ IV->getBase()->getBeginLoc(), IV->getBase()->getEndLoc(), castExpr);
// Cannot delete IV->getBase(), since PE points to it.
// Replace the old base with the cast. This is important when doing
// embedded rewrites. For example, [newInv->_container addObject:0].
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 747fdd241640..7015772fa168 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/FrontendTool/Utils.h"
#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Config/config.h"
@@ -23,10 +22,12 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/Utils.h"
+#include "clang/FrontendTool/Utils.h"
#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -61,8 +62,9 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
return llvm::make_unique<GenerateModuleFromModuleMapAction>();
case GenerateModuleInterface:
return llvm::make_unique<GenerateModuleInterfaceAction>();
+ case GenerateHeaderModule:
+ return llvm::make_unique<GenerateHeaderModuleAction>();
case GeneratePCH: return llvm::make_unique<GeneratePCHAction>();
- case GeneratePTH: return llvm::make_unique<GeneratePTHAction>();
case InitOnly: return llvm::make_unique<InitOnlyAction>();
case ParseSyntaxOnly: return llvm::make_unique<SyntaxOnlyAction>();
case ModuleFileInfo: return llvm::make_unique<DumpModuleInfoAction>();
@@ -88,7 +90,6 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
return nullptr;
}
- case PrintDeclContext: return llvm::make_unique<DeclContextPrintAction>();
case PrintPreamble: return llvm::make_unique<PrintPreambleAction>();
case PrintPreprocessedInput: {
if (CI.getPreprocessorOutputOpts().RewriteIncludes ||
@@ -182,7 +183,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
std::unique_ptr<OptTable> Opts = driver::createDriverOptTable();
- Opts->PrintHelp(llvm::outs(), "clang -cc1",
+ Opts->PrintHelp(llvm::outs(), "clang -cc1 [options] file...",
"LLVM 'Clang' Compiler: http://clang.llvm.org",
/*Include=*/driver::options::CC1Option,
/*Exclude=*/0, /*ShowAllAliases=*/false);
@@ -237,13 +238,23 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -analyzer-checker-help.
// This should happen AFTER plugins have been loaded!
if (Clang->getAnalyzerOpts()->ShowCheckerHelp) {
- ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
+ ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins,
+ Clang->getDiagnostics());
return true;
}
+
+ // Honor -analyzer-list-enabled-checkers.
if (Clang->getAnalyzerOpts()->ShowEnabledCheckerList) {
ento::printEnabledCheckerList(llvm::outs(),
Clang->getFrontendOpts().Plugins,
- *Clang->getAnalyzerOpts());
+ *Clang->getAnalyzerOpts(),
+ Clang->getDiagnostics());
+ }
+
+ // Honor -analyzer-config-help.
+ if (Clang->getAnalyzerOpts()->ShowConfigOptionsList) {
+ ento::printAnalyzerConfigList(llvm::outs());
+ return true;
}
#endif
@@ -256,7 +267,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
return false;
bool Success = Clang->ExecuteAction(*Act);
if (Clang->getFrontendOpts().DisableFree)
- BuryPointer(std::move(Act));
+ llvm::BuryPointer(std::move(Act));
return Success;
}
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 1930d8e225c7..e444c9c8706f 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -144,7 +144,7 @@ foreach( f ${files} ${cuda_wrapper_files} )
list(APPEND out_files ${dst})
endforeach( f )
-add_custom_command(OUTPUT ${output_dir}/arm_neon.h
+add_custom_command(OUTPUT ${output_dir}/arm_neon.h
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h ${output_dir}/arm_neon.h
COMMENT "Copying clang's arm_neon.h...")
@@ -156,7 +156,9 @@ add_custom_command(OUTPUT ${output_dir}/arm_fp16.h
list(APPEND out_files ${output_dir}/arm_fp16.h)
add_custom_target(clang-headers ALL DEPENDS ${out_files})
-set_target_properties(clang-headers PROPERTIES FOLDER "Misc")
+set_target_properties(clang-headers PROPERTIES
+ FOLDER "Misc"
+ RUNTIME_OUTPUT_DIRECTORY "${output_dir}")
install(
FILES ${files} ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
index 09705a273a47..f05c0454a883 100644
--- a/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -62,10 +62,15 @@
#include "cuda.h"
#if !defined(CUDA_VERSION)
#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000 || CUDA_VERSION > 9020
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 10000
#error "Unsupported CUDA version!"
#endif
+#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
+#if CUDA_VERSION >= 10000
+#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
+#endif
+
// Make largest subset of device functions available during host
// compilation -- SM_35 for the time being.
#ifndef __CUDA_ARCH__
@@ -419,6 +424,7 @@ __device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
#pragma pop_macro("dim3")
#pragma pop_macro("uint3")
#pragma pop_macro("__USE_FAST_MATH__")
+#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
#endif // __CUDA__
#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
diff --git a/lib/Headers/adxintrin.h b/lib/Headers/adxintrin.h
index ee347284178e..d6c454db8512 100644
--- a/lib/Headers/adxintrin.h
+++ b/lib/Headers/adxintrin.h
@@ -53,7 +53,7 @@ static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
unsigned int *__p)
{
- return __builtin_ia32_addcarry_u32(__cf, __x, __y, __p);
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
@@ -61,7 +61,7 @@ static __inline unsigned char __DEFAULT_FN_ATTRS
_addcarry_u64(unsigned char __cf, unsigned long long __x,
unsigned long long __y, unsigned long long *__p)
{
- return __builtin_ia32_addcarry_u64(__cf, __x, __y, __p);
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 90fd477d9b98..2dc6adb9009f 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -9492,49 +9492,51 @@ vec_splat_u32(signed char __a) {
/* vec_sr */
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
- vector unsigned char __res = (vector unsigned char)__a >> __b;
- return (vector signed char)__res;
-}
-
+// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
+// than the length of __a.
static __inline__ vector unsigned char __ATTRS_o_ai
vec_sr(vector unsigned char __a, vector unsigned char __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_sr(vector signed short __a, vector unsigned short __b) {
- vector unsigned short __res = (vector unsigned short)__a >> __b;
- return (vector signed short)__res;
+static __inline__ vector signed char __ATTRS_o_ai
+vec_sr(vector signed char __a, vector unsigned char __b) {
+ return (vector signed char)vec_sr((vector unsigned char)__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_sr(vector unsigned short __a, vector unsigned short __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sr(vector signed int __a, vector unsigned int __b) {
- vector unsigned int __res = (vector unsigned int)__a >> __b;
- return (vector signed int)__res;
+static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
+ vector unsigned short __b) {
+ return (vector short)vec_sr((vector unsigned short)__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_sr(vector unsigned int __a, vector unsigned int __b) {
- return __a >> __b;
+ return __a >>
+ (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
}
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sr(vector signed long long __a, vector unsigned long long __b) {
- vector unsigned long long __res = (vector unsigned long long)__a >> __b;
- return (vector signed long long)__res;
+static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
+ vector unsigned int __b) {
+ return (vector int)vec_sr((vector unsigned int)__a, __b);
}
+#ifdef __POWER8_VECTOR__
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
- return __a >> __b;
+ return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
+ __CHAR_BIT__));
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_sr(vector long long __a, vector unsigned long long __b) {
+ return (vector long long)vec_sr((vector unsigned long long)__a, __b);
}
#endif
@@ -9544,12 +9546,12 @@ vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_vsrb(vector signed char __a, vector unsigned char __b) {
- return __a >> (vector signed char)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_vsrh */
@@ -9558,12 +9560,12 @@ vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector short __ATTRS_o_ai
vec_vsrh(vector short __a, vector unsigned short __b) {
- return __a >> (vector short)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_vsrw */
@@ -9572,12 +9574,12 @@ vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
vector unsigned int __b) {
- return __a >> (vector int)__b;
+ return vec_sr(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
- return __a >> __b;
+ return vec_sr(__a, __b);
}
/* vec_sra */
@@ -16353,67 +16355,82 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
+typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
+typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
+typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
+typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
+typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
+typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
+typedef vector float unaligned_vec_float __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
signed char *__ptr) {
- return *(vector signed char *)(__ptr + __offset);
+ return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
vec_xl(signed long long __offset, unsigned char *__ptr) {
- return *(vector unsigned char *)(__ptr + __offset);
+ return *(unaligned_vec_uchar*)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
signed short *__ptr) {
- return *(vector signed short *)(__ptr + __offset);
+ return *(unaligned_vec_sshort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned short
vec_xl(signed long long __offset, unsigned short *__ptr) {
- return *(vector unsigned short *)(__ptr + __offset);
+ return *(unaligned_vec_ushort *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
signed int *__ptr) {
- return *(vector signed int *)(__ptr + __offset);
+ return *(unaligned_vec_sint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
unsigned int *__ptr) {
- return *(vector unsigned int *)(__ptr + __offset);
+ return *(unaligned_vec_uint *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
float *__ptr) {
- return *(vector float *)(__ptr + __offset);
+ return *(unaligned_vec_float *)(__ptr + __offset);
}
#ifdef __VSX__
+typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
+typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
+typedef vector double unaligned_vec_double __attribute__((aligned(1)));
+
static inline __ATTRS_o_ai vector signed long long
vec_xl(signed long long __offset, signed long long *__ptr) {
- return *(vector signed long long *)(__ptr + __offset);
+ return *(unaligned_vec_sll *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned long long
vec_xl(signed long long __offset, unsigned long long *__ptr) {
- return *(vector unsigned long long *)(__ptr + __offset);
+ return *(unaligned_vec_ull *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
double *__ptr) {
- return *(vector double *)(__ptr + __offset);
+ return *(unaligned_vec_double *)(__ptr + __offset);
}
#endif
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
+typedef vector unsigned __int128 unaligned_vec_ui128
+ __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
vec_xl(signed long long __offset, signed __int128 *__ptr) {
- return *(vector signed __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_si128 *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned __int128
vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
- return *(vector unsigned __int128 *)(__ptr + __offset);
+ return *(unaligned_vec_ui128 *)(__ptr + __offset);
}
#endif
@@ -16498,62 +16515,62 @@ vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
signed long long __offset,
signed char *__ptr) {
- *(vector signed char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
signed long long __offset,
unsigned char *__ptr) {
- *(vector unsigned char *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
signed long long __offset,
signed short *__ptr) {
- *(vector signed short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
signed long long __offset,
unsigned short *__ptr) {
- *(vector unsigned short *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
signed long long __offset,
signed int *__ptr) {
- *(vector signed int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
signed long long __offset,
unsigned int *__ptr) {
- *(vector unsigned int *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector float __vec,
signed long long __offset,
float *__ptr) {
- *(vector float *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_float *)(__ptr + __offset) = __vec;
}
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
signed long long __offset,
signed long long *__ptr) {
- *(vector signed long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
signed long long __offset,
unsigned long long *__ptr) {
- *(vector unsigned long long *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector double __vec,
signed long long __offset,
double *__ptr) {
- *(vector double *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_double *)(__ptr + __offset) = __vec;
}
#endif
@@ -16561,13 +16578,13 @@ static inline __ATTRS_o_ai void vec_xst(vector double __vec,
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
- *(vector signed __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
signed long long __offset,
unsigned __int128 *__ptr) {
- *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+ *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
}
#endif
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index fc4632374977..a90a255376c0 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -32,7 +32,216 @@ typedef unsigned int __mmask32;
typedef unsigned long long __mmask64;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+
+static __inline __mmask32 __DEFAULT_FN_ATTRS
+_knot_mask32(__mmask32 __M)
+{
+ return __builtin_ia32_knotsi(__M);
+}
+
+static __inline __mmask64 __DEFAULT_FN_ATTRS
+_knot_mask64(__mmask64 __M)
+{
+ return __builtin_ia32_knotdi(__M);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kand_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kand_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kandn_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kandn_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kor_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kor_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kxnor_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kxnor_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kxor_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kxor_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B);
+ return (unsigned char)__builtin_ia32_kortestzsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
+ return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B);
+ return (unsigned char)__builtin_ia32_ktestzsi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
+ return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_kadd_mask32(__mmask32 __A, __mmask32 __B)
+{
+ return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_kadd_mask64(__mmask64 __A, __mmask64 __B)
+{
+ return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
+}
+
+#define _kshiftli_mask32(A, I) \
+ (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I))
+
+#define _kshiftri_mask32(A, I) \
+ (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I))
+
+#define _kshiftli_mask64(A, I) \
+ (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I))
+
+#define _kshiftri_mask64(A, I) \
+ (__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_cvtmask32_u32(__mmask32 __A) {
+ return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_cvtmask64_u64(__mmask64 __A) {
+ return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_cvtu32_mask32(unsigned int __A) {
+ return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_cvtu64_mask64(unsigned long long __A) {
+ return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A);
+}
+
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+_load_mask32(__mmask32 *__A) {
+ return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A);
+}
+
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+_load_mask64(__mmask64 *__A) {
+ return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_store_mask32(__mmask32 *__A, __mmask32 __B) {
+ *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_store_mask64(__mmask64 *__A, __mmask64 __B) {
+ *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B);
+}
/* Integer compare */
@@ -176,102 +385,102 @@ typedef unsigned long long __mmask64;
#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \
_mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_add_epi8 (__m512i __A, __m512i __B) {
return (__m512i) ((__v64qu) __A + (__v64qu) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_add_epi8(__A, __B),
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_add_epi8(__A, __B),
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sub_epi8 (__m512i __A, __m512i __B) {
return (__m512i) ((__v64qu) __A - (__v64qu) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_sub_epi8(__A, __B),
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_sub_epi8(__A, __B),
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_add_epi16 (__m512i __A, __m512i __B) {
return (__m512i) ((__v32hu) __A + (__v32hu) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_add_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_add_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sub_epi16 (__m512i __A, __m512i __B) {
return (__m512i) ((__v32hu) __A - (__v32hu) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sub_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_sub_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mullo_epi16 (__m512i __A, __m512i __B) {
return (__m512i) ((__v32hu) __A * (__v32hu) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mullo_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mullo_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
{
return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
@@ -279,7 +488,7 @@ _mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
(__v64qi) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
{
return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
@@ -287,13 +496,13 @@ _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
(__v32hi) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi8 (__m512i __A)
{
return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
@@ -301,7 +510,7 @@ _mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
@@ -309,13 +518,13 @@ _mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi16 (__m512i __A)
{
return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -323,7 +532,7 @@ _mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -331,13 +540,13 @@ _mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_packs_epi32(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -345,7 +554,7 @@ _mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -353,13 +562,13 @@ _mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_packs_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -367,7 +576,7 @@ _mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -375,13 +584,13 @@ _mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_packus_epi32(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -389,7 +598,7 @@ _mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -397,13 +606,13 @@ _mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_packus_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -411,7 +620,7 @@ _mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -419,119 +628,95 @@ _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_adds_epi8(__A, __B),
+ (__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_adds_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_adds_epi16(__A, __B),
+ (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_adds_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_adds_epu8(__A, __B),
+ (__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_adds_epu8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_adds_epu16(__A, __B),
+ (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_adds_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_avg_epu8 (__m512i __A, __m512i __B)
{
typedef unsigned short __v64hu __attribute__((__vector_size__(128)));
@@ -541,7 +726,7 @@ _mm512_avg_epu8 (__m512i __A, __m512i __B)
>> 1, __v64qu);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
__m512i __B)
{
@@ -550,7 +735,7 @@ _mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
@@ -558,7 +743,7 @@ _mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_avg_epu16 (__m512i __A, __m512i __B)
{
typedef unsigned int __v32su __attribute__((__vector_size__(128)));
@@ -568,7 +753,7 @@ _mm512_avg_epu16 (__m512i __A, __m512i __B)
>> 1, __v32hu);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
__m512i __B)
{
@@ -577,7 +762,7 @@ _mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -585,13 +770,13 @@ _mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi8 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -599,7 +784,7 @@ _mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -607,13 +792,13 @@ _mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi16 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -621,7 +806,7 @@ _mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
__m512i __B)
{
@@ -630,13 +815,13 @@ _mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu8 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -644,7 +829,7 @@ _mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -652,13 +837,13 @@ _mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu16 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -666,7 +851,7 @@ _mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -674,13 +859,13 @@ _mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi8 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -688,7 +873,7 @@ _mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -696,13 +881,13 @@ _mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi16 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -710,7 +895,7 @@ _mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -718,13 +903,13 @@ _mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu8 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -732,7 +917,7 @@ _mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
@@ -740,13 +925,13 @@ _mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu16 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -754,7 +939,7 @@ _mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M,
@@ -762,13 +947,13 @@ _mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_shuffle_epi8(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
@@ -776,7 +961,7 @@ _mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
@@ -784,126 +969,102 @@ _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_subs_epi8(__A, __B),
+ (__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_subs_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_subs_epi16(__A, __B),
+ (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubsw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_subs_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_subs_epu8(__A, __B),
+ (__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_si512(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_subs_epu8(__A, __B),
+ (__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_subs_epu16(__A, __B),
+ (__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubusw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_si512(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_subs_epu16(__A, __B),
+ (__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B)
{
return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
(__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
__m512i __B)
{
@@ -912,7 +1073,7 @@ _mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I,
(__v32hi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
__m512i __B)
{
@@ -921,7 +1082,7 @@ _mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U,
(__v32hi)__I);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
__m512i __B)
{
@@ -930,13 +1091,13 @@ _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -944,7 +1105,7 @@ _mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -952,13 +1113,13 @@ _mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mulhi_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
__m512i __B)
{
@@ -967,7 +1128,7 @@ _mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A,
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -975,13 +1136,13 @@ _mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mulhi_epu16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -989,7 +1150,7 @@ _mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -997,12 +1158,12 @@ _mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maddubs_epi16(__m512i __X, __m512i __Y) {
return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
__m512i __Y) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
@@ -1010,114 +1171,114 @@ _mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X,
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U,
(__v32hi)_mm512_maddubs_epi16(__X, __Y),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_madd_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_madd_epi16(__A, __B),
(__v16si)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
(__v16si)_mm512_madd_epi16(__A, __B),
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtsepi16_epi8 (__m512i __A) {
return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
(__v32qi)_mm256_setzero_si256(),
(__mmask32) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
(__v32qi)__O,
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtusepi16_epi8 (__m512i __A) {
return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
(__mmask32) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
(__v32qi) __O,
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtepi16_epi8 (__m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) _mm256_undefined_si256(),
(__mmask32) -1);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) __O,
__M);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) {
return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A,
(__v32qi) _mm256_setzero_si256(),
__M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
__builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
__builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A)
{
__builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
8, 64+8, 9, 64+9,
@@ -1138,21 +1299,21 @@ _mm512_unpackhi_epi8(__m512i __A, __m512i __B) {
62, 64+62, 63, 64+63);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpackhi_epi8(__A, __B),
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpackhi_epi8(__A, __B),
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
4, 32+4, 5, 32+5,
@@ -1165,21 +1326,21 @@ _mm512_unpackhi_epi16(__m512i __A, __m512i __B) {
30, 32+30, 31, 32+31);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpackhi_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpackhi_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B,
0, 64+0, 1, 64+1,
@@ -1200,21 +1361,21 @@ _mm512_unpacklo_epi8(__m512i __A, __m512i __B) {
54, 64+54, 55, 64+55);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpacklo_epi8(__A, __B),
(__v64qi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
(__v64qi)_mm512_unpacklo_epi8(__A, __B),
(__v64qi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B,
0, 32+0, 1, 32+1,
@@ -1227,21 +1388,21 @@ _mm512_unpacklo_epi16(__m512i __A, __m512i __B) {
26, 32+26, 27, 32+27);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpacklo_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_unpacklo_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepi8_epi16(__m256i __A)
{
/* This function always performs a signed extension, but __v32qi is a char
@@ -1249,7 +1410,7 @@ _mm512_cvtepi8_epi16(__m256i __A)
return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1257,7 +1418,7 @@ _mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1265,13 +1426,13 @@ _mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtepu8_epi16(__m256i __A)
{
return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1279,7 +1440,7 @@ _mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1320,13 +1481,13 @@ _mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
(imm)), \
(__v32hi)_mm512_setzero_si512())
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sllv_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1334,7 +1495,7 @@ _mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1342,13 +1503,13 @@ _mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sll_epi16(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1356,7 +1517,7 @@ _mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1364,13 +1525,13 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi16(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1378,7 +1539,7 @@ _mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1389,13 +1550,13 @@ _mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
#define _mm512_bslli_epi128(a, imm) \
(__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srlv_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1403,7 +1564,7 @@ _mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1411,13 +1572,13 @@ _mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srav_epi16(__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1425,7 +1586,7 @@ _mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1433,13 +1594,13 @@ _mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sra_epi16(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1447,7 +1608,7 @@ _mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1455,13 +1616,13 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi16(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1469,7 +1630,7 @@ _mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1477,13 +1638,13 @@ _mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srl_epi16(__m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1491,7 +1652,7 @@ _mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1499,13 +1660,13 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi16(__m512i __A, int __B)
{
return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1513,7 +1674,7 @@ _mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
@@ -1524,7 +1685,7 @@ _mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
#define _mm512_bsrli_epi128(a, imm) \
(__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
@@ -1532,7 +1693,7 @@ _mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
(__v32hi) __W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
@@ -1540,7 +1701,7 @@ _mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
(__v32hi) _mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
@@ -1548,7 +1709,7 @@ _mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
(__v64qi) __W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
{
return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
@@ -1556,7 +1717,7 @@ _mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
(__v64qi) _mm512_setzero_si512 ());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
{
return (__m512i) __builtin_ia32_selectb_512(__M,
@@ -1564,7 +1725,7 @@ _mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
(__v64qi) __O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
{
return (__m512i) __builtin_ia32_selectb_512(__M,
@@ -1572,21 +1733,30 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
(__v64qi) _mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
{
return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
(__mmask64) __B);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_kunpackw (__mmask32 __A, __mmask32 __B)
{
return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A,
(__mmask32) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
@@ -1594,7 +1764,7 @@ _mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P)
(__mmask32) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P,
@@ -1603,7 +1773,16 @@ _mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P)
(__mmask32) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
@@ -1611,7 +1790,7 @@ _mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P)
(__mmask64) __U);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P,
@@ -1619,7 +1798,17 @@ _mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P)
_mm512_setzero_si512 (),
(__mmask64) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+
+static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi16 (void *__P, __m512i __A)
+{
+ struct __storeu_epi16 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
{
__builtin_ia32_storedquhi512_mask ((__v32hi *) __P,
@@ -1627,7 +1816,16 @@ _mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A)
(__mmask32) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS
+static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi8 (void *__P, __m512i __A)
+{
+ struct __storeu_epi8 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
{
__builtin_ia32_storedquqi512_mask ((__v64qi *) __P,
@@ -1635,86 +1833,86 @@ _mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A)
(__mmask64) __U);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_test_epi8_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_test_epi16_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_testn_epi8_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_testn_epi16_mask (__m512i __A, __m512i __B)
{
return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B),
_mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS512
_mm512_movepi8_mask (__m512i __A)
{
return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A);
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS512
_mm512_movepi16_mask (__m512i __A)
{
return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_movm_epi8 (__mmask64 __A)
{
return (__m512i) __builtin_ia32_cvtmask2b512 (__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_movm_epi16 (__mmask32 __A)
{
return (__m512i) __builtin_ia32_cvtmask2w512 (__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcastb_epi8 (__m128i __A)
{
return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A,
@@ -1724,7 +1922,7 @@ _mm512_broadcastb_epi8 (__m128i __A)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectb_512(__M,
@@ -1732,7 +1930,7 @@ _mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A)
(__v64qi) __O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectb_512(__M,
@@ -1740,7 +1938,7 @@ _mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A)
(__v64qi) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
{
return (__m512i) __builtin_ia32_selectw_512(__M,
@@ -1748,7 +1946,7 @@ _mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A)
(__v32hi) __O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
{
return (__m512i) __builtin_ia32_selectw_512(__M,
@@ -1756,7 +1954,7 @@ _mm512_maskz_set1_epi16 (__mmask32 __M, short __A)
(__v32hi) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcastw_epi16 (__m128i __A)
{
return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A,
@@ -1764,7 +1962,7 @@ _mm512_broadcastw_epi16 (__m128i __A)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectw_512(__M,
@@ -1772,7 +1970,7 @@ _mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A)
(__v32hi) __O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectw_512(__M,
@@ -1780,13 +1978,13 @@ _mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A)
(__v32hi) _mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_permutexvar_epi16 (__m512i __A, __m512i __B)
{
return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
__m512i __B)
{
@@ -1795,7 +1993,7 @@ _mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
__m512i __B)
{
@@ -1832,15 +2030,14 @@ _mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
(__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
(__v32hi)_mm512_setzero_si512())
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sad_epu8 (__m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A,
(__v64qi) __B);
}
-
-
+#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index 8a00b3afa9d5..6e6c293af22e 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -29,180 +29,309 @@
#define __AVX512DQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline __mmask8 __DEFAULT_FN_ATTRS
+_knot_mask8(__mmask8 __M)
+{
+ return __builtin_ia32_knotqi(__M);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kand_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kandn_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kor_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kxnor_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kxor_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestc_mask8_u8(__mmask8 __A, __mmask8 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestcqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestz_mask8_u8(__mmask8 __A, __mmask8 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestzqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_kortestcqi(__A, __B);
+ return (unsigned char)__builtin_ia32_kortestzqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestc_mask8_u8(__mmask8 __A, __mmask8 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestcqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestz_mask8_u8(__mmask8 __A, __mmask8 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestzqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_ktestcqi(__A, __B);
+ return (unsigned char)__builtin_ia32_ktestzqi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestc_mask16_u8(__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestchi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktestz_mask16_u8(__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char)__builtin_ia32_ktestzhi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_ktestchi(__A, __B);
+ return (unsigned char)__builtin_ia32_ktestzhi(__A, __B);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_kadd_mask8(__mmask8 __A, __mmask8 __B)
+{
+ return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_kadd_mask16(__mmask16 __A, __mmask16 __B)
+{
+ return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B);
+}
+
+#define _kshiftli_mask8(A, I) \
+ (__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I))
+
+#define _kshiftri_mask8(A, I) \
+ (__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_cvtmask8_u32(__mmask8 __A) {
+ return (unsigned int)__builtin_ia32_kmovb((__mmask8)__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_cvtu32_mask8(unsigned int __A) {
+ return (__mmask8)__builtin_ia32_kmovb((__mmask8)__A);
+}
+
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+_load_mask8(__mmask8 *__A) {
+ return (__mmask8)__builtin_ia32_kmovb(*(__mmask8 *)__A);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_store_mask8(__mmask8 *__A, __mmask8 __B) {
+ *(__mmask8 *)__A = __builtin_ia32_kmovb((__mmask8)__B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
return (__m512i) ((__v8du) __A * (__v8du) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_mullo_epi64(__A, __B),
(__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
(__v8di)_mm512_mullo_epi64(__A, __B),
(__v8di)_mm512_setzero_si512());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_xor_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A ^ (__v8du)__B);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_xor_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_xor_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_xor_ps (__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A ^ (__v16su)__B);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_xor_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_xor_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_or_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A | (__v8du)__B);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_or_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_or_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_or_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A | (__v16su)__B);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_or_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_or_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_and_pd(__m512d __A, __m512d __B) {
return (__m512d)((__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_and_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_and_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_and_ps(__m512 __A, __m512 __B) {
return (__m512)((__v16su)__A & (__v16su)__B);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_and_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_and_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_andnot_pd(__m512d __A, __m512d __B) {
return (__m512d)(~(__v8du)__A & (__v8du)__B);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_andnot_pd(__A, __B),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_andnot_pd(__A, __B),
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_andnot_ps(__m512 __A, __m512 __B) {
return (__m512)(~(__v16su)__A & (__v16su)__B);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_andnot_ps(__A, __B),
(__v16sf)__W);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) {
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
(__v16sf)_mm512_andnot_ps(__A, __B),
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epi64 (__m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -210,7 +339,7 @@ _mm512_cvtpd_epi64 (__m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
(__v8di) __W,
@@ -218,7 +347,7 @@ _mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -241,7 +370,7 @@ _mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epu64 (__m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -249,7 +378,7 @@ _mm512_cvtpd_epu64 (__m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
(__v8di) __W,
@@ -257,7 +386,7 @@ _mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -280,7 +409,7 @@ _mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epi64 (__m256 __A) {
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -288,7 +417,7 @@ _mm512_cvtps_epi64 (__m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -296,7 +425,7 @@ _mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -319,7 +448,7 @@ _mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epu64 (__m256 __A) {
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -327,7 +456,7 @@ _mm512_cvtps_epu64 (__m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -335,7 +464,7 @@ _mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -359,19 +488,19 @@ _mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
(__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_pd (__m512i __A) {
return (__m512d)__builtin_convertvector((__v8di)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtepi64_pd(__A),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtepi64_pd(__A),
@@ -393,7 +522,7 @@ _mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
(__v8df)_mm512_setzero_pd(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_ps (__m512i __A) {
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
(__v8sf) _mm256_setzero_ps(),
@@ -401,7 +530,7 @@ _mm512_cvtepi64_ps (__m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
(__v8sf) __W,
@@ -409,7 +538,7 @@ _mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
(__v8sf) _mm256_setzero_ps(),
@@ -433,7 +562,7 @@ _mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epi64 (__m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -441,7 +570,7 @@ _mm512_cvttpd_epi64 (__m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
(__v8di) __W,
@@ -449,7 +578,7 @@ _mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -472,7 +601,7 @@ _mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epu64 (__m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -480,7 +609,7 @@ _mm512_cvttpd_epu64 (__m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
(__v8di) __W,
@@ -488,7 +617,7 @@ _mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
(__v8di) _mm512_setzero_si512(),
@@ -511,7 +640,7 @@ _mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epi64 (__m256 __A) {
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -519,7 +648,7 @@ _mm512_cvttps_epi64 (__m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -527,7 +656,7 @@ _mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -550,7 +679,7 @@ _mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epu64 (__m256 __A) {
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -558,7 +687,7 @@ _mm512_cvttps_epu64 (__m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
(__v8di) __W,
@@ -566,7 +695,7 @@ _mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
(__v8di) _mm512_setzero_si512(),
@@ -589,19 +718,19 @@ _mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U), (int)(R))
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepu64_pd (__m512i __A) {
return (__m512d)__builtin_convertvector((__v8du)__A, __v8df);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtepu64_pd(__A),
(__v8df)__W);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
(__v8df)_mm512_cvtepu64_pd(__A),
@@ -625,7 +754,7 @@ _mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
(__mmask8)(U), (int)(R))
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtepu64_ps (__m512i __A) {
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
(__v8sf) _mm256_setzero_ps(),
@@ -633,7 +762,7 @@ _mm512_cvtepu64_ps (__m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
(__v8sf) __W,
@@ -641,7 +770,7 @@ _mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
_MM_FROUND_CUR_DIRECTION);
}
-static __inline__ __m256 __DEFAULT_FN_ATTRS
+static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
(__v8sf) _mm256_setzero_ps(),
@@ -935,32 +1064,32 @@ _mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U), (int)(C), (int)(R))
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_movepi32_mask (__m512i __A)
{
return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_movm_epi32 (__mmask16 __A)
{
return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_movm_epi64 (__mmask8 __A)
{
return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
}
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS
+static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
_mm512_movepi64_mask (__m512i __A)
{
return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcast_f32x2 (__m128 __A)
{
return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
@@ -968,7 +1097,7 @@ _mm512_broadcast_f32x2 (__m128 __A)
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -976,7 +1105,7 @@ _mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
(__v16sf)__O);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -984,7 +1113,7 @@ _mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcast_f32x8(__m256 __A)
{
return (__m512)__builtin_shufflevector((__v8sf)__A, (__v8sf)__A,
@@ -992,7 +1121,7 @@ _mm512_broadcast_f32x8(__m256 __A)
0, 1, 2, 3, 4, 5, 6, 7);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -1000,7 +1129,7 @@ _mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A)
(__v16sf)__O);
}
-static __inline__ __m512 __DEFAULT_FN_ATTRS
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A)
{
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
@@ -1008,14 +1137,14 @@ _mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A)
(__v16sf)_mm512_setzero_ps());
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_broadcast_f64x2(__m128d __A)
{
return (__m512d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
@@ -1023,7 +1152,7 @@ _mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A)
(__v8df)__O);
}
-static __inline__ __m512d __DEFAULT_FN_ATTRS
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A)
{
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
@@ -1031,7 +1160,7 @@ _mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A)
(__v8df)_mm512_setzero_pd());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcast_i32x2 (__m128i __A)
{
return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
@@ -1039,7 +1168,7 @@ _mm512_broadcast_i32x2 (__m128i __A)
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1047,7 +1176,7 @@ _mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
(__v16si)__O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1055,7 +1184,7 @@ _mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcast_i32x8(__m256i __A)
{
return (__m512i)__builtin_shufflevector((__v8si)__A, (__v8si)__A,
@@ -1063,7 +1192,7 @@ _mm512_broadcast_i32x8(__m256i __A)
0, 1, 2, 3, 4, 5, 6, 7);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1071,7 +1200,7 @@ _mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A)
(__v16si)__O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A)
{
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
@@ -1079,14 +1208,14 @@ _mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A)
(__v16si)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_broadcast_i64x2(__m128i __A)
{
return (__m512i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
0, 1, 0, 1, 0, 1, 0, 1);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1094,7 +1223,7 @@ _mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A)
(__v8di)__O);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
{
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
@@ -1256,6 +1385,7 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
(__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
(__mmask8)(U))
+#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
#endif
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 8dd4a0a40eea..1c19993ff1bb 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -175,6 +175,7 @@ typedef enum
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
/* Create vectors with repeated elements */
@@ -508,13 +509,13 @@ _mm512_castsi512_si256 (__m512i __A)
return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_int2mask(int __a)
{
return (__mmask16)__a;
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS
_mm512_mask2int(__mmask16 __a)
{
return (int)__a;
@@ -4329,6 +4330,15 @@ _mm512_loadu_si512 (void const *__P)
}
static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
@@ -4347,6 +4357,15 @@ _mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
}
static __inline __m512i __DEFAULT_FN_ATTRS512
+_mm512_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
+static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
{
return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
@@ -4482,6 +4501,15 @@ _mm512_load_epi64 (void const *__P)
/* SIMD store ops */
static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi64 (void *__P, __m512i __A)
+{
+ struct __storeu_epi64 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
{
__builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
@@ -4498,6 +4526,15 @@ _mm512_storeu_si512 (void *__P, __m512i __A)
}
static __inline void __DEFAULT_FN_ATTRS512
+_mm512_storeu_epi32 (void *__P, __m512i __A)
+{
+ struct __storeu_epi32 {
+ __m512i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
+static __inline void __DEFAULT_FN_ATTRS512
_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
{
__builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
@@ -4580,7 +4617,7 @@ _mm512_store_epi64 (void *__P, __m512i __A)
/* Mask ops */
-static __inline __mmask16 __DEFAULT_FN_ATTRS512
+static __inline __mmask16 __DEFAULT_FN_ATTRS
_mm512_knot(__mmask16 __M)
{
return __builtin_ia32_knothi(__M);
@@ -5622,7 +5659,7 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U), (int)(R))
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kmov (__mmask16 __A)
{
return __A;
@@ -7593,177 +7630,177 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
#define _mm512_i64gather_ps(index, addr, scale) \
(__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
(int)(scale))
#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
- (float const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i64gather_epi32(index, addr, scale) \
(__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)-1, (int)(scale))
#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i64gather_pd(index, addr, scale) \
(__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
(int)(scale))
#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i64gather_epi64(index, addr, scale) \
(__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), (__mmask8)-1, \
(int)(scale))
#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v8di)(__m512i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i32gather_ps(index, addr, scale) \
(__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v16sf)(__m512)(index), \
(__mmask16)-1, (int)(scale))
#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v16sf)(__m512)(index), \
(__mmask16)(mask), (int)(scale))
#define _mm512_i32gather_epi32(index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v16si)(__m512i)(index), \
(__mmask16)-1, (int)(scale))
#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v16si)(__m512i)(index), \
(__mmask16)(mask), (int)(scale))
#define _mm512_i32gather_pd(index, addr, scale) \
(__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), (__mmask8)-1, \
(int)(scale))
#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i32gather_epi64(index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), (__mmask8)-1, \
(int)(scale))
#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm512_i64scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
(__v8sf)(__m256)(v1), (int)(scale))
#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv16sf((float *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
(__v8sf)(__m256)(v1), (int)(scale))
#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
(__v8si)(__m256i)(v1), (int)(scale))
#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv16si((int *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
(__v8si)(__m256i)(v1), (int)(scale))
#define _mm512_i64scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
(__v8df)(__m512d)(v1), (int)(scale))
#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv8df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
(__v8df)(__m512d)(v1), (int)(scale))
#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \
(__v8di)(__m512i)(index), \
(__v8di)(__m512i)(v1), (int)(scale))
#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv8di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \
(__v8di)(__m512i)(index), \
(__v8di)(__m512i)(v1), (int)(scale))
#define _mm512_i32scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)-1, \
+ __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \
(__v16si)(__m512i)(index), \
(__v16sf)(__m512)(v1), (int)(scale))
#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv16sf((float *)(addr), (__mmask16)(mask), \
+ __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \
(__v16si)(__m512i)(index), \
(__v16sf)(__m512)(v1), (int)(scale))
#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)-1, \
+ __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \
(__v16si)(__m512i)(index), \
(__v16si)(__m512i)(v1), (int)(scale))
#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv16si((int *)(addr), (__mmask16)(mask), \
+ __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \
(__v16si)(__m512i)(index), \
(__v16si)(__m512i)(v1), (int)(scale))
#define _mm512_i32scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), \
(__v8df)(__m512d)(v1), (int)(scale))
#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv8df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), \
(__v8df)(__m512d)(v1), (int)(scale))
#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), \
(__v8di)(__m512i)(v1), (int)(scale))
#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv8di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), \
(__v8di)(__m512i)(v1), (int)(scale))
@@ -8320,54 +8357,105 @@ _mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kand (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kandn (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS
_mm512_kortestc (__mmask16 __A, __mmask16 __B)
{
return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ int __DEFAULT_FN_ATTRS512
+static __inline__ int __DEFAULT_FN_ATTRS
_mm512_kortestz (__mmask16 __A, __mmask16 __B)
{
return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestchi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B)
+{
+ return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
+ *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B);
+ return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kxnor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kxor (__mmask16 __A, __mmask16 __B)
{
return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
}
+#define _kand_mask16 _mm512_kand
+#define _kandn_mask16 _mm512_kandn
+#define _knot_mask16 _mm512_knot
+#define _kor_mask16 _mm512_kor
+#define _kxnor_mask16 _mm512_kxnor
+#define _kxor_mask16 _mm512_kxor
+
+#define _kshiftli_mask16(A, I) \
+ (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
+
+#define _kshiftri_mask16(A, I) \
+ (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_cvtmask16_u32(__mmask16 __A) {
+ return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_cvtu32_mask16(unsigned int __A) {
+ return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A);
+}
+
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_load_mask16(__mmask16 *__A) {
+ return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_store_mask16(__mmask16 *__A, __mmask16 __B) {
+ *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B);
+}
+
static __inline__ void __DEFAULT_FN_ATTRS512
_mm512_stream_si512 (__m512i * __P, __m512i __A)
{
@@ -9594,5 +9682,6 @@ _mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS
#endif /* __AVX512FINTRIN_H */
diff --git a/lib/Headers/avx512pfintrin.h b/lib/Headers/avx512pfintrin.h
index 5b8260b77c63..73b2234fb410 100644
--- a/lib/Headers/avx512pfintrin.h
+++ b/lib/Headers/avx512pfintrin.h
@@ -33,78 +33,78 @@
#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (long long const *)(addr), (int)(scale), \
+ (void const *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \
__builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \
- (long long const *)(addr), (int)(scale), \
+ (void const *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (int const *)(addr), \
+ (__v16si)(__m512i)(index), (void const *)(addr), \
(int)(scale), (int)(hint))
#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \
__builtin_ia32_gatherpfdps((__mmask16) -1, \
- (__v16si)(__m512i)(index), (int const *)(addr), \
+ (__v16si)(__m512i)(index), (void const *)(addr), \
(int)(scale), (int)(hint))
#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (long long const *)(addr), (int)(scale), \
+ (void const *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \
__builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \
- (long long const *)(addr), (int)(scale), \
+ (void const *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (int const *)(addr), (int)(scale), (int)(hint))
+ (void const *)(addr), (int)(scale), (int)(hint))
#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \
__builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \
- (int const *)(addr), (int)(scale), (int)(hint))
+ (void const *)(addr), (int)(scale), (int)(hint))
#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \
__builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \
- (long long *)(addr), (int)(scale), \
+ (void *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
- (long long *)(addr), (int)(scale), \
+ (void *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \
__builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint))
+ (void *)(addr), (int)(scale), (int)(hint))
#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfdps((__mmask16)(mask), \
- (__v16si)(__m512i)(index), (int *)(addr), \
+ (__v16si)(__m512i)(index), (void *)(addr), \
(int)(scale), (int)(hint))
#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \
__builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \
- (long long *)(addr), (int)(scale), \
+ (void *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (long long *)(addr), (int)(scale), \
+ (void *)(addr), (int)(scale), \
(int)(hint))
#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \
__builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint))
+ (void *)(addr), (int)(scale), (int)(hint))
#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \
__builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
- (int *)(addr), (int)(scale), (int)(hint))
+ (void *)(addr), (int)(scale), (int)(hint))
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512vbmi2intrin.h b/lib/Headers/avx512vbmi2intrin.h
index d2a58094fd07..53242524293f 100644
--- a/lib/Headers/avx512vbmi2intrin.h
+++ b/lib/Headers/avx512vbmi2intrin.h
@@ -227,167 +227,141 @@ _mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
(__v32hi)_mm512_setzero_si512())
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shldv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B)
+_mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvq512_mask ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshldvq512((__v8di)__A, (__v8di)__B,
+ (__v8di)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shldv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvq512_maskz ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_shldv_epi64(__A, __B, __C),
+ (__v8di)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shldv_epi64(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvq512_mask ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_shldv_epi64(__A, __B, __C),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shldv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
+_mm512_shldv_epi32(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshldvd512((__v16si)__A, (__v16si)__B,
+ (__v16si)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shldv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvd512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- __U);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_shldv_epi32(__A, __B, __C),
+ (__v16si)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shldv_epi32(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_shldv_epi32(__A, __B, __C),
+ (__v16si)_mm512_setzero_si512());
}
-
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shldv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B)
+_mm512_shldv_epi16(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvw512_mask ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshldvw512((__v32hi)__A, (__v32hi)__B,
+ (__v32hi)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shldv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvw512_maskz ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- __U);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_shldv_epi16(__A, __B, __C),
+ (__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shldv_epi16(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshldvw512_mask ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_shldv_epi16(__A, __B, __C),
+ (__v32hi)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shrdv_epi64(__m512i __S, __mmask8 __U, __m512i __A, __m512i __B)
+_mm512_shrdv_epi64(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvq512_mask ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshrdvq512((__v8di)__A, (__v8di)__B,
+ (__v8di)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shrdv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvq512_maskz ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- __U);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_shrdv_epi64(__A, __B, __C),
+ (__v8di)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shrdv_epi64(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvq512_mask ((__v8di) __S,
- (__v8di) __A,
- (__v8di) __B,
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_selectq_512(__U,
+ (__v8di)_mm512_shrdv_epi64(__A, __B, __C),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shrdv_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B)
+_mm512_shrdv_epi32(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshrdvd512((__v16si)__A, (__v16si)__B,
+ (__v16si)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shrdv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvd512_maskz ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- __U);
+ return (__m512i) __builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_shrdv_epi32(__A, __B, __C),
+ (__v16si)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shrdv_epi32(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvd512_mask ((__v16si) __S,
- (__v16si) __A,
- (__v16si) __B,
- (__mmask16) -1);
+ return (__m512i) __builtin_ia32_selectd_512(__U,
+ (__v16si)_mm512_shrdv_epi32(__A, __B, __C),
+ (__v16si)_mm512_setzero_si512());
}
-
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shrdv_epi16(__m512i __S, __mmask32 __U, __m512i __A, __m512i __B)
+_mm512_shrdv_epi16(__m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvw512_mask ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- __U);
+ return (__m512i)__builtin_ia32_vpshrdvw512((__v32hi)__A, (__v32hi)__B,
+ (__v32hi)__C);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __S, __m512i __A, __m512i __B)
+_mm512_mask_shrdv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvw512_maskz ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- __U);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_shrdv_epi16(__A, __B, __C),
+ (__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shrdv_epi16(__m512i __S, __m512i __A, __m512i __B)
+_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C)
{
- return (__m512i) __builtin_ia32_vpshrdvw512_mask ((__v32hi) __S,
- (__v32hi) __A,
- (__v32hi) __B,
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_selectw_512(__U,
+ (__v32hi)_mm512_shrdv_epi16(__A, __B, __C),
+ (__v32hi)_mm512_setzero_si512());
}
diff --git a/lib/Headers/avx512vbmiintrin.h b/lib/Headers/avx512vbmiintrin.h
index b6e93c285871..5463d9015504 100644
--- a/lib/Headers/avx512vbmiintrin.h
+++ b/lib/Headers/avx512vbmiintrin.h
@@ -91,30 +91,26 @@ _mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A,
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_multishift_epi64_epi8 (__m512i __W, __mmask64 __M, __m512i __X, __m512i __Y)
+_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v64qi) __W,
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_vpmultishiftqb512((__v64qi)__X, (__v64qi) __Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_multishift_epi64_epi8 (__mmask64 __M, __m512i __X, __m512i __Y)
+_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X,
+ __m512i __Y)
{
- return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v64qi) _mm512_setzero_si512 (),
- (__mmask64) __M);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_multishift_epi64_epi8 (__m512i __X, __m512i __Y)
+_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
- (__v64qi) __Y,
- (__v64qi) _mm512_undefined_epi32 (),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M,
+ (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y),
+ (__v64qi)_mm512_setzero_si512());
}
diff --git a/lib/Headers/avx512vbmivlintrin.h b/lib/Headers/avx512vbmivlintrin.h
index 9a0400b2b5d5..b5d5aa9af523 100644
--- a/lib/Headers/avx512vbmivlintrin.h
+++ b/lib/Headers/avx512vbmivlintrin.h
@@ -150,61 +150,49 @@ _mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y)
+_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
- (__v16qi) __Y,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_vpmultishiftqb128((__v16qi)__X, (__v16qi)__Y);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
+_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X,
+ __m128i __Y)
{
- return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
- (__v16qi) __Y,
- (__v16qi)
- _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_multishift_epi64_epi8(__X, __Y),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
+_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
- (__v16qi) __Y,
- (__v16qi)
- _mm_undefined_si128 (),
- (__mmask16) -1);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_multishift_epi64_epi8(__X, __Y),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y)
+_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
- (__v32qi) __Y,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_vpmultishiftqb256((__v32qi)__X, (__v32qi)__Y);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
+_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X,
+ __m256i __Y)
{
- return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
- (__v32qi) __Y,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
+_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
- (__v32qi) __Y,
- (__v32qi)
- _mm256_undefined_si256 (),
- (__mmask32) -1);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y),
+ (__v32qi)_mm256_setzero_si256());
}
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index 1b038dd04df6..87e0023e8b74 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -2297,6 +2297,15 @@ _mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
(__v32qi) _mm256_setzero_si256());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -2314,6 +2323,15 @@ _mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi16 (void const *__P)
+{
+ struct __loadu_epi16 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi16*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
{
@@ -2331,6 +2349,15 @@ _mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
{
@@ -2348,6 +2375,15 @@ _mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
(__mmask16) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi8 (void const *__P)
+{
+ struct __loadu_epi8 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi8*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
{
@@ -2364,7 +2400,17 @@ _mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
_mm256_setzero_si256 (),
(__mmask32) __U);
}
-static __inline__ void __DEFAULT_FN_ATTRS256
+
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi16 (void *__P, __m128i __A)
+{
+ struct __storeu_epi16 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
{
__builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
@@ -2372,6 +2418,15 @@ _mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi16 (void *__P, __m256i __A)
+{
+ struct __storeu_epi16 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi16*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
{
@@ -2380,6 +2435,15 @@ _mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
(__mmask16) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi8 (void *__P, __m128i __A)
+{
+ struct __storeu_epi8 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
{
@@ -2388,6 +2452,15 @@ _mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
(__mmask16) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi8 (void *__P, __m256i __A)
+{
+ struct __storeu_epi8 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi8*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
{
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index 0ee1d00ef4d2..a2cdc0a96e59 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -462,10 +462,16 @@ _mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_and_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a & (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_and_si256(__A, __B),
+ (__v8si)_mm256_and_epi32(__A, __B),
(__v8si)__W);
}
@@ -476,10 +482,16 @@ _mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_and_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a & (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_and_si128(__A, __B),
+ (__v4si)_mm_and_epi32(__A, __B),
(__v4si)__W);
}
@@ -490,10 +502,16 @@ _mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_andnot_epi32(__m256i __A, __m256i __B)
+{
+ return (__m256i)(~(__v8su)__A & (__v8su)__B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_andnot_si256(__A, __B),
+ (__v8si)_mm256_andnot_epi32(__A, __B),
(__v8si)__W);
}
@@ -505,24 +523,36 @@ _mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_andnot_epi32(__m128i __A, __m128i __B)
+{
+ return (__m128i)(~(__v4su)__A & (__v4su)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_andnot_si128(__A, __B),
+ (__v4si)_mm_andnot_epi32(__A, __B),
(__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_or_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a | (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_or_si256(__A, __B),
+ (__v8si)_mm256_or_epi32(__A, __B),
(__v8si)__W);
}
@@ -533,10 +563,16 @@ _mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_or_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a | (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_or_si128(__A, __B),
+ (__v4si)_mm_or_epi32(__A, __B),
(__v4si)__W);
}
@@ -547,10 +583,16 @@ _mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_xor_epi32(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v8su)__a ^ (__v8su)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_xor_si256(__A, __B),
+ (__v8si)_mm256_xor_epi32(__A, __B),
(__v8si)__W);
}
@@ -561,11 +603,16 @@ _mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_xor_epi32(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v4su)__a ^ (__v4su)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_xor_si128(__A, __B),
+ (__v4si)_mm_xor_epi32(__A, __B),
(__v4si)__W);
}
@@ -576,10 +623,16 @@ _mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_and_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a & (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_and_si256(__A, __B),
+ (__v4di)_mm256_and_epi64(__A, __B),
(__v4di)__W);
}
@@ -590,10 +643,16 @@ _mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_and_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a & (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_and_si128(__A, __B),
+ (__v2di)_mm_and_epi64(__A, __B),
(__v2di)__W);
}
@@ -604,10 +663,16 @@ _mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_andnot_epi64(__m256i __A, __m256i __B)
+{
+ return (__m256i)(~(__v4du)__A & (__v4du)__B);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_andnot_si256(__A, __B),
+ (__v4di)_mm256_andnot_epi64(__A, __B),
(__v4di)__W);
}
@@ -619,10 +684,16 @@ _mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_andnot_epi64(__m128i __A, __m128i __B)
+{
+ return (__m128i)(~(__v2du)__A & (__v2du)__B);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_andnot_si128(__A, __B),
+ (__v2di)_mm_andnot_epi64(__A, __B),
(__v2di)__W);
}
@@ -633,10 +704,16 @@ _mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_or_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a | (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_or_si256(__A, __B),
+ (__v4di)_mm256_or_epi64(__A, __B),
(__v4di)__W);
}
@@ -647,10 +724,16 @@ _mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_or_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a | (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_or_si128(__A, __B),
+ (__v2di)_mm_or_epi64(__A, __B),
(__v2di)__W);
}
@@ -661,10 +744,16 @@ _mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_xor_epi64(__m256i __a, __m256i __b)
+{
+ return (__m256i)((__v4du)__a ^ (__v4du)__b);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_xor_si256(__A, __B),
+ (__v4di)_mm256_xor_epi64(__A, __B),
(__v4di)__W);
}
@@ -675,11 +764,17 @@ _mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_xor_epi64(__m128i __a, __m128i __b)
+{
+ return (__m128i)((__v2du)__a ^ (__v2du)__b);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A,
__m128i __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_xor_si128(__A, __B),
+ (__v2di)_mm_xor_epi64(__A, __B),
(__v2di)__W);
}
@@ -3389,162 +3484,162 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
}
#define _mm_i64scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
(__v2df)(__m128d)(v1), (int)(scale))
#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv2df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
(__v2df)(__m128d)(v1), (int)(scale))
#define _mm_i64scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
(__v2di)(__m128i)(v1), (int)(scale))
#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv2di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
(__v2di)(__m128i)(v1), (int)(scale))
#define _mm256_i64scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
(__v4df)(__m256d)(v1), (int)(scale))
#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv4df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
(__v4df)(__m256d)(v1), (int)(scale))
#define _mm256_i64scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
(__v4di)(__m256i)(v1), (int)(scale))
#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv4di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
(__v4di)(__m256i)(v1), (int)(scale))
#define _mm_i64scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv4sf((float *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm_i64scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \
(__v2di)(__m128i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv4si((int *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \
(__v2di)(__m128i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm256_i64scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv8sf((float *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm256_i64scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \
(__v4di)(__m256i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scatterdiv8si((int *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \
(__v4di)(__m256i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm_i32scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
(__v2df)(__m128d)(v1), (int)(scale))
#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv2df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), \
(__v2df)(__m128d)(v1), (int)(scale))
#define _mm_i32scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
(__v2di)(__m128i)(v1), (int)(scale))
#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv2di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), \
(__v2di)(__m128i)(v1), (int)(scale))
#define _mm256_i32scatter_pd(addr, index, v1, scale) \
- __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
(__v4df)(__m256d)(v1), (int)(scale))
#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv4df((double *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), \
(__v4df)(__m256d)(v1), (int)(scale))
#define _mm256_i32scatter_epi64(addr, index, v1, scale) \
- __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
(__v4di)(__m256i)(v1), (int)(scale))
#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv4di((long long *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), \
(__v4di)(__m256i)(v1), (int)(scale))
#define _mm_i32scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv4sf((float *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \
(int)(scale))
#define _mm_i32scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \
(__v4si)(__m128i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv4si((int *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \
(__v4si)(__m128i)(index), \
(__v4si)(__m128i)(v1), (int)(scale))
#define _mm256_i32scatter_ps(addr, index, v1, scale) \
- __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
(int)(scale))
#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv8sf((float *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \
(int)(scale))
#define _mm256_i32scatter_epi32(addr, index, v1, scale) \
- __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)-1, \
+ __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \
(__v8si)(__m256i)(index), \
(__v8si)(__m256i)(v1), (int)(scale))
#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
- __builtin_ia32_scattersiv8si((int *)(addr), (__mmask8)(mask), \
+ __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \
(__v8si)(__m256i)(index), \
(__v8si)(__m256i)(v1), (int)(scale))
@@ -4989,6 +5084,12 @@ _mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
(__v8si) _mm256_setzero_si256 ());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_load_epi32 (void const *__P)
+{
+ return *(__m128i *) __P;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5008,6 +5109,12 @@ _mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_load_epi32 (void const *__P)
+{
+ return *(__m256i *) __P;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5027,6 +5134,12 @@ _mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
__U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_store_epi32 (void *__P, __m128i __A)
+{
+ *(__m128i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5035,6 +5148,12 @@ _mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_store_epi32 (void *__P, __m256i __A)
+{
+ *(__m256i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5075,6 +5194,12 @@ _mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
(__v4di) _mm256_setzero_si256 ());
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_load_epi64 (void const *__P)
+{
+ return *(__m128i *) __P;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5094,6 +5219,12 @@ _mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
__U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_load_epi64 (void const *__P)
+{
+ return *(__m256i *) __P;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5113,6 +5244,12 @@ _mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
__U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_store_epi64 (void *__P, __m128i __A)
+{
+ *(__m128i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5121,6 +5258,12 @@ _mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_store_epi64 (void *__P, __m256i __A)
+{
+ *(__m256i *) __P = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5366,6 +5509,15 @@ _mm256_maskz_load_ps (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5383,6 +5535,15 @@ _mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi64 (void const *__P)
+{
+ struct __loadu_epi64 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi64*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5400,6 +5561,15 @@ _mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m128i __DEFAULT_FN_ATTRS128
+_mm_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
{
@@ -5417,6 +5587,15 @@ _mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P)
(__mmask8) __U);
}
+static __inline __m256i __DEFAULT_FN_ATTRS256
+_mm256_loadu_epi32 (void const *__P)
+{
+ struct __loadu_epi32 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_epi32*)__P)->__v;
+}
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P)
{
@@ -5534,6 +5713,15 @@ _mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi64 (void *__P, __m128i __A)
+{
+ struct __storeu_epi64 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5542,6 +5730,15 @@ _mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi64 (void *__P, __m256i __A)
+{
+ struct __storeu_epi64 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi64*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -5550,6 +5747,15 @@ _mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS128
+_mm_storeu_epi32 (void *__P, __m128i __A)
+{
+ struct __storeu_epi32 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
{
@@ -5558,6 +5764,15 @@ _mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A)
(__mmask8) __U);
}
+static __inline void __DEFAULT_FN_ATTRS256
+_mm256_storeu_epi32 (void *__P, __m256i __A)
+{
+ struct __storeu_epi32 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_epi32*)__P)->__v = __A;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A)
{
@@ -7769,97 +7984,97 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v2di)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v2di)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
(__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v4di)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v4di)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v2di)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v2di)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v4di)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v4di)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
(__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
- (double const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
- (long long const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v4si)(__m128i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
(__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
- (float const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
(__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
- (int const *)(addr), \
+ (void const *)(addr), \
(__v8si)(__m256i)(index), \
(__mmask8)(mask), (int)(scale))
diff --git a/lib/Headers/avx512vlvbmi2intrin.h b/lib/Headers/avx512vlvbmi2intrin.h
index baaf5654631c..632d14fb55aa 100644
--- a/lib/Headers/avx512vlvbmi2intrin.h
+++ b/lib/Headers/avx512vlvbmi2intrin.h
@@ -421,327 +421,279 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__v8hi)_mm_setzero_si128())
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+_mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshldvq256((__v4di)__A, (__v4di)__B,
+ (__v4di)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvq256_maskz ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_shldv_epi64(__A, __B, __C),
+ (__v4di)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvq256_mask ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_shldv_epi64(__A, __B, __C),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshldvq128((__v2di)__A, (__v2di)__B,
+ (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvq128_maskz ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_shldv_epi64(__A, __B, __C),
+ (__v2di)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_shldv_epi64(__A, __B, __C),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shldv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+_mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshldvd256((__v8si)__A, (__v8si)__B,
+ (__v8si)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shldv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvd256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_shldv_epi32(__A, __B, __C),
+ (__v8si)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_shldv_epi32(__A, __B, __C),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshldvd128((__v4si)__A, (__v4si)__B,
+ (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvd128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_shldv_epi32(__A, __B, __C),
+ (__v4si)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_shldv_epi32(__A, __B, __C),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shldv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
+_mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshldvw256((__v16hi)__A, (__v16hi)__B,
+ (__v16hi)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shldv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvw256_maskz ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_shldv_epi16(__A, __B, __C),
+ (__v16hi)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshldvw256_mask ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_shldv_epi16(__A, __B, __C),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshldvw128((__v8hi)__A, (__v8hi)__B,
+ (__v8hi)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvw128_maskz ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_shldv_epi16(__A, __B, __C),
+ (__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_shldv_epi16(__A, __B, __C),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shrdv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+_mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshrdvq256((__v4di)__A, (__v4di)__B,
+ (__v4di)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvq256_maskz ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_shrdv_epi64(__A, __B, __C),
+ (__v4di)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvq256_mask ((__v4di) __S,
- (__v4di) __A,
- (__v4di) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectq_256(__U,
+ (__v4di)_mm256_shrdv_epi64(__A, __B, __C),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshrdvq128((__v2di)__A, (__v2di)__B,
+ (__v2di)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvq128_maskz ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_shrdv_epi64(__A, __B, __C),
+ (__v2di)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
- (__v2di) __A,
- (__v2di) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectq_128(__U,
+ (__v2di)_mm_shrdv_epi64(__A, __B, __C),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shrdv_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
+_mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshrdvd256((__v8si)__A, (__v8si)__B,
+ (__v8si)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvd256_maskz ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_shrdv_epi32(__A, __B, __C),
+ (__v8si)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvd256_mask ((__v8si) __S,
- (__v8si) __A,
- (__v8si) __B,
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_selectd_256(__U,
+ (__v8si)_mm256_shrdv_epi32(__A, __B, __C),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshrdvd128((__v4si)__A, (__v4si)__B,
+ (__v4si)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvd128_maskz ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_shrdv_epi32(__A, __B, __C),
+ (__v4si)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
- (__v4si) __A,
- (__v4si) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectd_128(__U,
+ (__v4si)_mm_shrdv_epi32(__A, __B, __C),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_shrdv_epi16(__m256i __S, __mmask16 __U, __m256i __A, __m256i __B)
+_mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- __U);
+ return (__m256i)__builtin_ia32_vpshrdvw256((__v16hi)__A, (__v16hi)__B,
+ (__v16hi)__C);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __S, __m256i __A, __m256i __B)
+_mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvw256_maskz ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- __U);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_shrdv_epi16(__A, __B, __C),
+ (__v16hi)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B)
+_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C)
{
- return (__m256i) __builtin_ia32_vpshrdvw256_mask ((__v16hi) __S,
- (__v16hi) __A,
- (__v16hi) __B,
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_selectw_256(__U,
+ (__v16hi)_mm256_shrdv_epi16(__A, __B, __C),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- __U);
+ return (__m128i)__builtin_ia32_vpshrdvw128((__v8hi)__A, (__v8hi)__B,
+ (__v8hi)__C);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvw128_maskz ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- __U);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_shrdv_epi16(__A, __B, __C),
+ (__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C)
{
- return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
- (__v8hi) __A,
- (__v8hi) __B,
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_selectw_128(__U,
+ (__v8hi)_mm_shrdv_epi16(__A, __B, __C),
+ (__v8hi)_mm_setzero_si128());
}
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index d03bef442a28..56c20b78d340 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -62,7 +62,7 @@
static __inline__ unsigned short __RELAXED_FN_ATTRS
__tzcnt_u16(unsigned short __X)
{
- return __X ? __builtin_ctzs(__X) : 16;
+ return __builtin_ia32_tzcnt_u16(__X);
}
/// Performs a bitwise AND of the second operand with the one's
@@ -196,7 +196,7 @@ __blsr_u32(unsigned int __X)
static __inline__ unsigned int __RELAXED_FN_ATTRS
__tzcnt_u32(unsigned int __X)
{
- return __X ? __builtin_ctz(__X) : 32;
+ return __builtin_ia32_tzcnt_u32(__X);
}
/// Counts the number of trailing zero bits in the operand.
@@ -212,7 +212,7 @@ __tzcnt_u32(unsigned int __X)
static __inline__ int __RELAXED_FN_ATTRS
_mm_tzcnt_32(unsigned int __X)
{
- return __X ? __builtin_ctz(__X) : 32;
+ return __builtin_ia32_tzcnt_u32(__X);
}
#ifdef __x86_64__
@@ -359,7 +359,7 @@ __blsr_u64(unsigned long long __X)
static __inline__ unsigned long long __RELAXED_FN_ATTRS
__tzcnt_u64(unsigned long long __X)
{
- return __X ? __builtin_ctzll(__X) : 64;
+ return __builtin_ia32_tzcnt_u64(__X);
}
/// Counts the number of trailing zero bits in the operand.
@@ -375,7 +375,7 @@ __tzcnt_u64(unsigned long long __X)
static __inline__ long long __RELAXED_FN_ATTRS
_mm_tzcnt_64(unsigned long long __X)
{
- return __X ? __builtin_ctzll(__X) : 64;
+ return __builtin_ia32_tzcnt_u64(__X);
}
#endif /* __x86_64__ */
diff --git a/lib/Headers/cuda_wrappers/new b/lib/Headers/cuda_wrappers/new
index 71b7a52363ce..f49811c5a57c 100644
--- a/lib/Headers/cuda_wrappers/new
+++ b/lib/Headers/cuda_wrappers/new
@@ -73,10 +73,12 @@ __device__ inline void operator delete[](void *ptr,
// Sized delete, C++14 only.
#if __cplusplus >= 201402L
-__device__ void operator delete(void *ptr, __SIZE_TYPE__ size) CUDA_NOEXCEPT {
+__device__ inline void operator delete(void *ptr,
+ __SIZE_TYPE__ size) CUDA_NOEXCEPT {
::operator delete(ptr);
}
-__device__ void operator delete[](void *ptr, __SIZE_TYPE__ size) CUDA_NOEXCEPT {
+__device__ inline void operator delete[](void *ptr,
+ __SIZE_TYPE__ size) CUDA_NOEXCEPT {
::operator delete(ptr);
}
#endif
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index f0ea7cd05c63..6d61f9719944 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -1675,7 +1675,49 @@ _mm_loadu_si64(void const *__a)
long long __v;
} __attribute__((__packed__, __may_alias__));
long long __u = ((struct __loadu_si64*)__a)->__v;
- return __extension__ (__m128i)(__v2di){__u, 0L};
+ return __extension__ (__m128i)(__v2di){__u, 0LL};
+}
+
+/// Loads a 32-bit integer value to the low element of a 128-bit integer
+/// vector and clears the upper element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+///
+/// \param __a
+/// A pointer to a 32-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si32(void const *__a)
+{
+ struct __loadu_si32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ int __u = ((struct __loadu_si32*)__a)->__v;
+ return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+}
+
+/// Loads a 16-bit integer value to the low element of a 128-bit integer
+/// vector and clears the upper element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic does not correspond to a specific instruction.
+///
+/// \param __a
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_loadu_si16(void const *__a)
+{
+ struct __loadu_si16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ short __u = ((struct __loadu_si16*)__a)->__v;
+ return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
}
/// Loads a 64-bit double-precision value to the low element of a
@@ -3993,6 +4035,69 @@ _mm_storeu_si128(__m128i *__p, __m128i __b)
((struct __storeu_si128*)__p)->__v = __b;
}
+/// Stores a 64-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location. The address of the memory
+/// location does not have to be algned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si64(void const *__p, __m128i __b)
+{
+ struct __storeu_si64 {
+ long long __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+}
+
+/// Stores a 32-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si32(void const *__p, __m128i __b)
+{
+ struct __storeu_si32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+}
+
+/// Stores a 16-bit integer value from the low element of a 128-bit integer
+/// vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic does not correspond to a specific instruction.
+///
+/// \param __p
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __b
+/// A 128-bit integer vector containing the value to be stored.
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_storeu_si16(void const *__p, __m128i __b)
+{
+ struct __storeu_si16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+}
+
/// Moves bytes selected by the mask from the first operand to the
/// specified unaligned memory location. When a mask bit is 1, the
/// corresponding byte is written, otherwise it is not written.
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index 44d4d05494f5..56215cd624d7 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -21,8 +21,8 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __FLOAT_H
-#define __FLOAT_H
+#ifndef __CLANG_FLOAT_H
+#define __CLANG_FLOAT_H
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
@@ -85,6 +85,9 @@
# undef FLT_DECIMAL_DIG
# undef DBL_DECIMAL_DIG
# undef LDBL_DECIMAL_DIG
+# undef FLT_HAS_SUBNORM
+# undef DBL_HAS_SUBNORM
+# undef LDBL_HAS_SUBNORM
# endif
#endif
@@ -141,6 +144,9 @@
# define FLT_DECIMAL_DIG __FLT_DECIMAL_DIG__
# define DBL_DECIMAL_DIG __DBL_DECIMAL_DIG__
# define LDBL_DECIMAL_DIG __LDBL_DECIMAL_DIG__
+# define FLT_HAS_SUBNORM __FLT_HAS_DENORM__
+# define DBL_HAS_SUBNORM __DBL_HAS_DENORM__
+# define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__
#endif
#ifdef __STDC_WANT_IEC_60559_TYPES_EXT__
@@ -157,4 +163,4 @@
# define FLT16_TRUE_MIN __FLT16_TRUE_MIN__
#endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */
-#endif /* __FLOAT_H */
+#endif /* __CLANG_FLOAT_H */
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index e7bfbf964d56..7d0722ec7652 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -306,6 +306,65 @@ _writegsbase_u64(unsigned long long __V)
#endif
#endif /* __FSGSBASE__ */
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
+
+/* The structs used below are to force the load/store to be unaligned. This
+ * is accomplished with the __packed__ attribute. The __may_alias__ prevents
+ * tbaa metadata from being generated based on the struct and the type of the
+ * field inside of it.
+ */
+
+static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_loadbe_i16(void const * __P) {
+ struct __loadu_i16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ return __builtin_bswap16(((struct __loadu_i16*)__P)->__v);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_storebe_i16(void * __P, short __D) {
+ struct __storeu_i16 {
+ short __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_loadbe_i32(void const * __P) {
+ struct __loadu_i32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ return __builtin_bswap32(((struct __loadu_i32*)__P)->__v);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_storebe_i32(void * __P, int __D) {
+ struct __storeu_i32 {
+ int __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_loadbe_i64(void const * __P) {
+ struct __loadu_i64 {
+ long long __v;
+ } __attribute__((__packed__, __may_alias__));
+ return __builtin_bswap64(((struct __loadu_i64*)__P)->__v);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
+_storebe_i64(void * __P, long long __D) {
+ struct __storeu_i64 {
+ long long __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
+}
+#endif
+#endif /* __MOVBE */
+
#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
#include <rtmintrin.h>
#include <xtestintrin.h>
diff --git a/lib/Headers/intrin.h b/lib/Headers/intrin.h
index 91914214e299..c86f41faeb88 100644
--- a/lib/Headers/intrin.h
+++ b/lib/Headers/intrin.h
@@ -90,8 +90,6 @@ void __inwordstring(unsigned short, unsigned short *, unsigned long);
void __lidt(void *);
unsigned __int64 __ll_lshift(unsigned __int64, int);
__int64 __ll_rshift(__int64, int);
-unsigned int __lzcnt(unsigned int);
-unsigned short __lzcnt16(unsigned short);
static __inline__
void __movsb(unsigned char *, unsigned char const *, size_t);
static __inline__
@@ -219,7 +217,6 @@ void __incgsbyte(unsigned long);
void __incgsdword(unsigned long);
void __incgsqword(unsigned long);
void __incgsword(unsigned long);
-unsigned __int64 __lzcnt64(unsigned __int64);
static __inline__
void __movsq(unsigned long long *, unsigned long long const *, size_t);
static __inline__
@@ -329,189 +326,63 @@ __int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
|* Interlocked Exchange Add
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
-}
+char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
+char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
+short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
+short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
+long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
+long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
+__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
+__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Increment
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_acq(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_nf(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16_rel(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_acq(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_nf(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedIncrement_rel(long volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_acq(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_nf(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64_rel(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
+short _InterlockedIncrement16_acq(short volatile *_Value);
+short _InterlockedIncrement16_nf(short volatile *_Value);
+short _InterlockedIncrement16_rel(short volatile *_Value);
+long _InterlockedIncrement_acq(long volatile *_Value);
+long _InterlockedIncrement_nf(long volatile *_Value);
+long _InterlockedIncrement_rel(long volatile *_Value);
+__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Decrement
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_acq(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_nf(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16_rel(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_acq(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_nf(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedDecrement_rel(long volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_acq(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_nf(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64_rel(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
-}
+short _InterlockedDecrement16_acq(short volatile *_Value);
+short _InterlockedDecrement16_nf(short volatile *_Value);
+short _InterlockedDecrement16_rel(short volatile *_Value);
+long _InterlockedDecrement_acq(long volatile *_Value);
+long _InterlockedDecrement_nf(long volatile *_Value);
+long _InterlockedDecrement_rel(long volatile *_Value);
+__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
+__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked And
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
+char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
+short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
+short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
+long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
+long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
+long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Bit Counting and Testing
@@ -534,261 +405,81 @@ unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
|* Interlocked Or
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
+char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
+char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
+short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
+short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
+short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
+long _InterlockedOr_acq(long volatile *_Value, long _Mask);
+long _InterlockedOr_nf(long volatile *_Value, long _Mask);
+long _InterlockedOr_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Xor
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_acq(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_nf(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8_rel(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_acq(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_nf(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16_rel(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_acq(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_nf(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor_rel(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
-}
+char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
+char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
+char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
+short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
+short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
+short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
+long _InterlockedXor_acq(long volatile *_Value, long _Mask);
+long _InterlockedXor_nf(long volatile *_Value, long _Mask);
+long _InterlockedXor_rel(long volatile *_Value, long _Mask);
+__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
+__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_acq(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_nf(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8_rel(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_acq(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_nf(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16_rel(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_acq(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_nf(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_rel(long volatile *_Target, long _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
- return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
- return _Value;
-}
+char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
+char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
+char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
+short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
+short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
+short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
+long _InterlockedExchange_acq(long volatile *_Target, long _Value);
+long _InterlockedExchange_nf(long volatile *_Target, long _Value);
+long _InterlockedExchange_rel(long volatile *_Target, long _Value);
+__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
+__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Compare Exchange
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_acq(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_nf(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_rel(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_acq(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_nf(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_rel(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_acq(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_nf(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_rel(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
+char _InterlockedCompareExchange8_acq(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_nf(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_rel(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+short _InterlockedCompareExchange16_acq(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_nf(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_rel(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+long _InterlockedCompareExchange_acq(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_nf(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_rel(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
#endif
/*----------------------------------------------------------------------------*\
@@ -841,7 +532,7 @@ __stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
static __inline__ void __DEFAULT_FN_ATTRS
__cpuid(int __info[4], int __level) {
__asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level));
+ : "a"(__level), "c"(0));
}
static __inline__ void __DEFAULT_FN_ATTRS
__cpuidex(int __info[4], int __level, int __ecx) {
@@ -858,23 +549,32 @@ static __inline__ void __DEFAULT_FN_ATTRS
__halt(void) {
__asm__ volatile ("hlt");
}
+#endif
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
static __inline__ void __DEFAULT_FN_ATTRS
__nop(void) {
__asm__ volatile ("nop");
}
#endif
-#if defined(__x86_64__)
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__shiftleft128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
- unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
- unsigned __int128 __res = __val << (__d & 63);
- return (unsigned __int64)(__res >> 64);
+
+/*----------------------------------------------------------------------------*\
+|* MS AArch64 specific
+\*----------------------------------------------------------------------------*/
+#if defined(__aarch64__)
+unsigned __int64 __getReg(int);
+long _InterlockedAdd(long volatile *Addend, long Value);
+int _ReadStatusReg(int);
+void _WriteStatusReg(int, int);
+
+static inline unsigned short _byteswap_ushort (unsigned short val) {
+ return __builtin_bswap16(val);
}
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__shiftright128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
- unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
- unsigned __int128 __res = __val >> (__d & 63);
- return (unsigned __int64)__res;
+static inline unsigned long _byteswap_ulong (unsigned long val) {
+ return __builtin_bswap32(val);
+}
+static inline unsigned __int64 _byteswap_uint64 (unsigned __int64 val) {
+ return __builtin_bswap64(val);
}
#endif
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 558f1828f0e7..35c1651cc4a8 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -31,6 +31,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+#ifndef _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
@@ -41,11 +42,8 @@
/// An unsigned 16-bit integer whose leading zeros are to be counted.
/// \returns An unsigned 16-bit integer containing the number of leading zero
/// bits in the operand.
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-__lzcnt16(unsigned short __X)
-{
- return __X ? __builtin_clzs(__X) : 16;
-}
+#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X))
+#endif // _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
@@ -61,7 +59,7 @@ __lzcnt16(unsigned short __X)
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__lzcnt32(unsigned int __X)
{
- return __X ? __builtin_clz(__X) : 32;
+ return __builtin_ia32_lzcnt_u32(__X);
}
/// Counts the number of leading zero bits in the operand.
@@ -78,10 +76,11 @@ __lzcnt32(unsigned int __X)
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_lzcnt_u32(unsigned int __X)
{
- return __X ? __builtin_clz(__X) : 32;
+ return __builtin_ia32_lzcnt_u32(__X);
}
#ifdef __x86_64__
+#ifndef _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
/// \headerfile <x86intrin.h>
@@ -93,11 +92,8 @@ _lzcnt_u32(unsigned int __X)
/// \returns An unsigned 64-bit integer containing the number of leading zero
/// bits in the operand.
/// \see _lzcnt_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__lzcnt64(unsigned long long __X)
-{
- return __X ? __builtin_clzll(__X) : 64;
-}
+#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X))
+#endif // _MSC_VER
/// Counts the number of leading zero bits in the operand.
///
@@ -113,7 +109,7 @@ __lzcnt64(unsigned long long __X)
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_lzcnt_u64(unsigned long long __X)
{
- return __X ? __builtin_clzll(__X) : 64;
+ return __builtin_ia32_lzcnt_u64(__X);
}
#endif
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index e481c792df71..160bae807174 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -22,6 +22,14 @@
#endif //cl_khr_3d_image_writes
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+#ifndef cl_intel_planar_yuv
+#define cl_intel_planar_yuv
+#endif // cl_intel_planar_yuv
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
+#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+
#define __ovld __attribute__((overloadable))
#define __conv __attribute__((convergent))
@@ -14462,7 +14470,7 @@ half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-int printf(__constant const char* st, ...);
+int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
#endif
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -14602,6 +14610,7 @@ int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, f
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
@@ -14609,6 +14618,7 @@ int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_
int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
@@ -14618,6 +14628,7 @@ int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, f
uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
@@ -14625,6 +14636,7 @@ int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_
int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
#ifdef cl_khr_depth_images
float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
@@ -14727,6 +14739,8 @@ uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler,
#endif //cl_khr_mipmap_image
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+
/**
* Sampler-less Image Access
*/
@@ -14760,24 +14774,31 @@ float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+
// Image read functions returning half4 type
#ifdef cl_khr_fp16
half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
+half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
+/**
+ * Sampler-less Image Access
+ */
half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_1_2
#endif //cl_khr_fp16
// Image read functions for read_write images
@@ -15707,7 +15728,6 @@ double __ovld __conv work_group_scan_inclusive_max(double x);
// OpenCL v2.0 s6.13.16 - Pipe Functions
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-#define PIPE_RESERVE_ID_VALID_BIT (1U << 30)
#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -16193,6 +16213,637 @@ void __ovld __conv intel_sub_group_block_write_us4( __global ushort* p, u
void __ovld __conv intel_sub_group_block_write_us8( __global ushort* p, ushort8 data );
#endif // cl_intel_subgroups_short
+#ifdef cl_intel_device_side_avc_motion_estimation
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
+
+#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
+#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
+
+#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
+#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
+#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
+#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
+#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
+#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
+
+#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
+#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
+#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
+#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
+#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
+#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
+#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
+
+#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
+#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
+#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
+
+#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
+#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
+#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
+#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
+#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
+#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
+#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
+#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
+
+#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
+#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
+
+#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
+#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
+#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
+
+#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
+#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
+#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
+#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
+
+#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
+#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
+#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
+#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
+#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
+
+#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
+#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
+#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
+#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
+
+#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
+#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
+#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
+
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
+#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
+
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
+#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
+
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
+#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
+
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
+#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
+
+#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
+#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
+
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
+#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
+#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
+
+#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
+#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
+#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
+
+#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
+#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
+
+#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
+
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
+#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
+
+// MCE built-in functions
+uchar __ovld
+intel_sub_group_avc_mce_get_default_inter_base_multi_reference_penalty(
+ uchar slice_type, uchar qp);
+ulong __ovld intel_sub_group_avc_mce_get_default_inter_shape_penalty(
+ uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_inter_direction_penalty(
+ uchar slice_type, uchar qp);
+uint __ovld intel_sub_group_avc_mce_get_default_intra_luma_shape_penalty(
+ uchar slice_type, uchar qp);
+uint2 __ovld
+intel_sub_group_avc_mce_get_default_inter_motion_vector_cost_table(
+ uchar slice_type, uchar qp);
+uchar __ovld intel_sub_group_avc_mce_get_default_intra_luma_mode_penalty(
+ uchar slice_type, uchar qp);
+
+uint2 __ovld intel_sub_group_avc_mce_get_default_high_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_medium_penalty_cost_table();
+uint2 __ovld intel_sub_group_avc_mce_get_default_low_penalty_cost_table();
+uint __ovld intel_sub_group_avc_mce_get_default_non_dc_luma_intra_penalty();
+uchar __ovld
+intel_sub_group_avc_mce_get_default_intra_chroma_mode_base_penalty();
+
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_shape_penalty(
+ ulong packed_shape_penalty, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_ac_only_haar(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_mce_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_mce_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_mce_get_motion_vectors(
+ intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_inter_distortions(
+ intel_sub_group_avc_mce_result_t result);
+ushort __ovld intel_sub_group_avc_mce_get_best_inter_distortion(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_major_shape(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_minor_shapes(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_directions(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld intel_sub_group_avc_mce_get_inter_motion_vector_count(
+ intel_sub_group_avc_mce_result_t result);
+uint __ovld intel_sub_group_avc_mce_get_inter_reference_ids(
+ intel_sub_group_avc_mce_result_t result);
+uchar __ovld
+intel_sub_group_avc_mce_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_mce_result_t result);
+
+// IME built-in functions
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_initialize(
+ ushort2 src_coord, uchar partition_mask, uchar sad_adjustment);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference(
+ short2 ref_offset, uchar search_window_config,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference(
+ short2 fwd_ref_offset, short2 bwd_ref_offset, uchar search_window_config,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_max_motion_vector_count(
+ uchar max_motion_vector_count, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_unidirectional_mix_disable(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_early_search_termination_threshold(
+ uchar threshold, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_weighted_sad(
+ uint packed_sad_weights, intel_sub_group_avc_ime_payload_t payload);
+
+__attribute__((deprecated("If you use the latest Intel driver, please use "
+ "intel_sub_group_avc_ime_ref_window_size instead",
+ "intel_sub_group_avc_ime_ref_window_size")))
+ushort2 __ovld
+intel_sub_group_ime_ref_window_size(uchar search_window_config, char dual_ref);
+ushort2 __ovld intel_sub_group_avc_ime_ref_window_size(
+ uchar search_window_config, char dual_ref);
+short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
+ short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
+ ushort2 image_size);
+
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamout(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamout(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streamin(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streamin(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_single_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_single_reference_streaminout(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_single_reference_streamin_t streamin_components);
+intel_sub_group_avc_ime_result_dual_reference_streamout_t __ovld
+intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ime_payload_t payload,
+ intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+
+intel_sub_group_avc_ime_single_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_single_reference_streamin(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_dual_reference_streamin_t __ovld
+intel_sub_group_avc_ime_get_dual_reference_streamin(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_single_reference_streamout(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_ime_strip_dual_reference_streamout(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+ intel_sub_group_avc_ime_result_single_reference_streamout_t result,
+ uchar major_shape);
+uint __ovld intel_sub_group_avc_ime_get_streamout_major_shape_motion_vectors(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+ushort __ovld intel_sub_group_avc_ime_get_streamout_major_shape_distortions(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+uchar __ovld intel_sub_group_avc_ime_get_streamout_major_shape_reference_ids(
+ intel_sub_group_avc_ime_result_dual_reference_streamout_t result,
+ uchar major_shape, uchar direction);
+
+uchar __ovld intel_sub_group_avc_ime_get_border_reached(
+ uchar image_select, intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_truncated_search_indication(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ime_get_unidirectional_early_search_termination(
+ intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_motion_vector(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ime_get_weighting_pattern_minimum_distortion(
+ intel_sub_group_avc_ime_result_t result);
+
+// REF built-in functions
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_fme_initialize(
+ ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+ uchar minor_shapes, uchar directions, uchar pixel_resolution,
+ uchar sad_adjustment);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_bme_initialize(
+ ushort2 src_coord, ulong motion_vectors, uchar major_shapes,
+ uchar minor_shapes, uchar directions, uchar pixel_resolution,
+ uchar bidirectional_weight, uchar sad_adjustment);
+
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bidirectional_mix_disable(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_bilinear_filter_enable(
+ intel_sub_group_avc_ref_payload_t payload);
+
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ sampler_t vme_media_sampler, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_ref_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+ intel_sub_group_avc_ref_payload_t payload);
+
+// SIC built-in functions
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_initialize(
+ ushort2 src_coord);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_skc(
+ uint skip_block_partition_type, uint skip_motion_vector_mask,
+ ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+ uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+ uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+ uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+ uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
+ uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
+ ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
+ ushort upper_edge_chroma_pixels, uchar intra_sad_adjustment,
+ intel_sub_group_avc_sic_payload_t payload);
+uint __ovld
+intel_sub_group_avc_sic_get_motion_vector_mask(
+ uint skip_block_partition_type, uchar direction);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_shape_penalty(
+ uint packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_luma_mode_cost_function(
+ uchar luma_mode_penalty, uint luma_packed_neighbor_modes,
+ uint luma_packed_non_dc_penalty, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_intra_chroma_mode_cost_function(
+ uchar chroma_mode_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_bilinear_filter_enable(
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_skc_forward_transform_enable(
+ ulong packed_sad_coefficients, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
+ uchar block_based_skip_type,
+ intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_ipe(
+ read_only image2d_t src_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_single_reference(
+ read_only image2d_t src_image, read_only image2d_t ref_image,
+ sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_dual_reference(
+ read_only image2d_t src_image, read_only image2d_t fwd_ref_image,
+ read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ sampler_t vme_media_sampler, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_sic_evaluate_with_multi_reference(
+ read_only image2d_t src_image, uint packed_reference_ids,
+ uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
+ intel_sub_group_avc_sic_payload_t payload);
+
+uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_luma_distortion(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_best_ipe_chroma_distortion(
+ intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_ipe_luma_modes(
+ intel_sub_group_avc_sic_result_t result);
+uchar __ovld intel_sub_group_avc_sic_get_ipe_chroma_mode(
+ intel_sub_group_avc_sic_result_t result);
+uint __ovld intel_sub_group_avc_sic_get_packed_skc_luma_count_threshold(
+ intel_sub_group_avc_sic_result_t result);
+ulong __ovld intel_sub_group_avc_sic_get_packed_skc_luma_sum_threshold(
+ intel_sub_group_avc_sic_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_raw_sads(
+ intel_sub_group_avc_sic_result_t result);
+
+// Wrappers
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_base_multi_reference_penalty(
+ uchar reference_base_penalty, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_shape_penalty(
+ ulong packed_shape_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_inter_direction_penalty(
+ uchar direction_cost, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_motion_vector_cost_function(
+ ulong packed_cost_center_delta, uint2 packed_cost_table,
+ uchar cost_precision, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_source_interlaced_field_polarity(
+ uchar src_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_single_reference_interlaced_field_polarity(
+ uchar ref_field_polarity, intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_dual_reference_interlaced_field_polarities(
+ uchar fwd_ref_field_polarity, uchar bwd_ref_field_polarity,
+ intel_sub_group_avc_sic_payload_t payload);
+
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_ime_set_ac_only_haar(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_ref_set_ac_only_haar(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_sic_set_ac_only_haar(
+ intel_sub_group_avc_sic_payload_t payload);
+
+ulong __ovld intel_sub_group_avc_ime_get_motion_vectors(
+ intel_sub_group_avc_ime_result_t result);
+ulong __ovld intel_sub_group_avc_ref_get_motion_vectors(
+ intel_sub_group_avc_ref_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_inter_distortions(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_inter_distortions(
+ intel_sub_group_avc_ref_result_t result);
+ushort __ovld intel_sub_group_avc_sic_get_inter_distortions(
+ intel_sub_group_avc_sic_result_t result);
+
+ushort __ovld intel_sub_group_avc_ime_get_best_inter_distortion(
+ intel_sub_group_avc_ime_result_t result);
+ushort __ovld intel_sub_group_avc_ref_get_best_inter_distortion(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_major_shape(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_major_shape(
+ intel_sub_group_avc_ref_result_t result);
+uchar __ovld intel_sub_group_avc_ime_get_inter_minor_shapes(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_minor_shapes(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_directions(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_directions(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld intel_sub_group_avc_ime_get_inter_motion_vector_count(
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld intel_sub_group_avc_ref_get_inter_motion_vector_count(
+ intel_sub_group_avc_ref_result_t result);
+
+uint __ovld intel_sub_group_avc_ime_get_inter_reference_ids(
+ intel_sub_group_avc_ime_result_t result);
+uint __ovld intel_sub_group_avc_ref_get_inter_reference_ids(
+ intel_sub_group_avc_ref_result_t result);
+
+uchar __ovld
+intel_sub_group_avc_ime_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_ime_result_t result);
+uchar __ovld
+intel_sub_group_avc_ref_get_inter_reference_interlaced_field_polarities(
+ uint packed_reference_ids, uint packed_reference_parameter_field_polarities,
+ intel_sub_group_avc_ref_result_t result);
+
+// Type conversion functions
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_payload(
+ intel_sub_group_avc_ime_payload_t payload);
+intel_sub_group_avc_ime_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_payload(
+ intel_sub_group_avc_ref_payload_t payload);
+intel_sub_group_avc_ref_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+intel_sub_group_avc_mce_payload_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_payload(
+ intel_sub_group_avc_sic_payload_t payload);
+intel_sub_group_avc_sic_payload_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_payload(
+ intel_sub_group_avc_mce_payload_t payload);
+
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ime_convert_to_mce_result(
+ intel_sub_group_avc_ime_result_t result);
+intel_sub_group_avc_ime_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ime_result(
+ intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_ref_convert_to_mce_result(
+ intel_sub_group_avc_ref_result_t result);
+intel_sub_group_avc_ref_result_t __ovld
+intel_sub_group_avc_mce_convert_to_ref_result(
+ intel_sub_group_avc_mce_result_t result);
+intel_sub_group_avc_mce_result_t __ovld
+intel_sub_group_avc_sic_convert_to_mce_result(
+ intel_sub_group_avc_sic_result_t result);
+intel_sub_group_avc_sic_result_t __ovld
+intel_sub_group_avc_mce_convert_to_sic_result(
+ intel_sub_group_avc_mce_result_t result);
+#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
+#endif // cl_intel_device_side_avc_motion_estimation
+
#ifdef cl_amd_media_ops
uint __ovld amd_bitalign(uint a, uint b, uint c);
uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
diff --git a/lib/Headers/unwind.h b/lib/Headers/unwind.h
index 345fa4d0c193..0e8317e5b9d9 100644
--- a/lib/Headers/unwind.h
+++ b/lib/Headers/unwind.h
@@ -154,8 +154,12 @@ struct _Unwind_Control_Block {
struct _Unwind_Exception {
_Unwind_Exception_Class exception_class;
_Unwind_Exception_Cleanup_Fn exception_cleanup;
+#if !defined (__USING_SJLJ_EXCEPTIONS__) && defined (__SEH__)
+ _Unwind_Word private_[6];
+#else
_Unwind_Word private_1;
_Unwind_Word private_2;
+#endif
/* The Itanium ABI requires that _Unwind_Exception objects are "double-word
* aligned". GCC has interpreted this to mean "use the maximum useful
* alignment for the target"; so do we. */
diff --git a/lib/Headers/vecintrin.h b/lib/Headers/vecintrin.h
index f7061e88949f..e627389838df 100644
--- a/lib/Headers/vecintrin.h
+++ b/lib/Headers/vecintrin.h
@@ -381,7 +381,7 @@ vec_insert_and_zero(const unsigned long long *__ptr) {
static inline __ATTRS_o_ai vector float
vec_insert_and_zero(const float *__ptr) {
vector float __vec = (vector float)0;
- __vec[0] = *__ptr;
+ __vec[1] = *__ptr;
return __vec;
}
#endif
@@ -5942,13 +5942,13 @@ vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
static inline __ATTRS_o_ai vector float
vec_orc(vector float __a, vector float __b) {
- return (vector float)((vector unsigned int)__a &
+ return (vector float)((vector unsigned int)__a |
~(vector unsigned int)__b);
}
static inline __ATTRS_o_ai vector double
vec_orc(vector double __a, vector double __b) {
- return (vector double)((vector unsigned long long)__a &
+ return (vector double)((vector unsigned long long)__a |
~(vector unsigned long long)__b);
}
#endif
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index 918068a2405f..a2659119a2ff 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -720,6 +720,7 @@ void CommentASTToXMLConverter::visitBlockCommandComment(
case CommandTraits::KCI_version:
case CommandTraits::KCI_warning:
ParagraphKind = C->getCommandName(Traits);
+ break;
default:
break;
}
diff --git a/lib/Index/IndexBody.cpp b/lib/Index/IndexBody.cpp
index 786563a44a4d..54a6df2496a9 100644
--- a/lib/Index/IndexBody.cpp
+++ b/lib/Index/IndexBody.cpp
@@ -9,6 +9,7 @@
#include "IndexingContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/ASTLambda.h"
using namespace clang;
using namespace clang::index;
@@ -143,7 +144,7 @@ public:
bool VisitMemberExpr(MemberExpr *E) {
SourceLocation Loc = E->getMemberLoc();
if (Loc.isInvalid())
- Loc = E->getLocStart();
+ Loc = E->getBeginLoc();
SmallVector<SymbolRelation, 4> Relations;
SymbolRoleSet Roles = getRolesForRef(E, Relations);
return IndexCtx.handleReference(E->getMemberDecl(), Loc,
@@ -175,7 +176,7 @@ public:
return true;
SourceLocation Loc = NameInfo.getLoc();
if (Loc.isInvalid())
- Loc = E->getLocStart();
+ Loc = E->getBeginLoc();
SmallVector<SymbolRelation, 4> Relations;
SymbolRoleSet Roles = getRolesForRef(E, Relations);
return IndexCtx.handleReference(Symbols[0], Loc, Parent, ParentDC, Roles,
@@ -259,8 +260,24 @@ public:
if (isDynamic(E)) {
Roles |= (unsigned)SymbolRole::Dynamic;
- if (auto *RecD = E->getReceiverInterface())
- Relations.emplace_back((unsigned)SymbolRole::RelationReceivedBy, RecD);
+
+ auto addReceivers = [&](const ObjCObjectType *Ty) {
+ if (!Ty)
+ return;
+ if (const auto *clsD = Ty->getInterface()) {
+ Relations.emplace_back((unsigned)SymbolRole::RelationReceivedBy,
+ clsD);
+ }
+ for (const auto *protD : Ty->quals()) {
+ Relations.emplace_back((unsigned)SymbolRole::RelationReceivedBy,
+ protD);
+ }
+ };
+ QualType recT = E->getReceiverType();
+ if (const auto *Ptr = recT->getAs<ObjCObjectPointerType>())
+ addReceivers(Ptr->getObjectType());
+ else
+ addReceivers(recT->getAs<ObjCObjectType>());
}
return IndexCtx.handleReference(MD, E->getSelectorStartLoc(),
@@ -312,8 +329,8 @@ public:
SmallVector<SymbolRelation, 2> Relations;
addCallRole(Roles, Relations);
Roles |= (unsigned)SymbolRole::Implicit;
- return IndexCtx.handleReference(MD, E->getLocStart(),
- Parent, ParentDC, Roles, Relations, E);
+ return IndexCtx.handleReference(MD, E->getBeginLoc(), Parent, ParentDC,
+ Roles, Relations, E);
}
bool VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
@@ -432,12 +449,22 @@ public:
for (unsigned I = 0, E = S->getNumComponents(); I != E; ++I) {
const OffsetOfNode &Component = S->getComponent(I);
if (Component.getKind() == OffsetOfNode::Field)
- IndexCtx.handleReference(Component.getField(), Component.getLocEnd(),
+ IndexCtx.handleReference(Component.getField(), Component.getEndLoc(),
Parent, ParentDC, SymbolRoleSet(), {});
// FIXME: Try to resolve dependent field references.
}
return true;
}
+
+ bool VisitParmVarDecl(ParmVarDecl* D) {
+ // Index the parameters of lambda expression.
+ if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
+ const auto *DC = D->getDeclContext();
+ if (DC && isLambdaCallOperator(DC))
+ IndexCtx.handleDecl(D);
+ }
+ return true;
+ }
};
} // anonymous namespace
diff --git a/lib/Index/IndexDecl.cpp b/lib/Index/IndexDecl.cpp
index 01ad3a277216..a7725f9dd97f 100644
--- a/lib/Index/IndexDecl.cpp
+++ b/lib/Index/IndexDecl.cpp
@@ -263,7 +263,7 @@ public:
} else if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(D)) {
if (auto TypeNameInfo = Dtor->getNameInfo().getNamedTypeInfo()) {
IndexCtx.handleReference(Dtor->getParent(),
- TypeNameInfo->getTypeLoc().getLocStart(),
+ TypeNameInfo->getTypeLoc().getBeginLoc(),
Dtor->getParent(), Dtor->getDeclContext());
}
} else if (const auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) {
diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp
index 03b55ffe8a4e..1cdc0984f780 100644
--- a/lib/Index/IndexSymbol.cpp
+++ b/lib/Index/IndexSymbol.cpp
@@ -96,6 +96,9 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (isFunctionLocalSymbol(D)) {
Info.Properties |= (SymbolPropertySet)SymbolProperty::Local;
}
+ if (isa<ObjCProtocolDecl>(D->getDeclContext())) {
+ Info.Properties |= (SymbolPropertySet)SymbolProperty::ProtocolInterface;
+ }
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
@@ -519,6 +522,7 @@ void index::applyForEachSymbolProperty(SymbolPropertySet Props,
APPLY_FOR_PROPERTY(IBOutletCollection);
APPLY_FOR_PROPERTY(GKInspectable);
APPLY_FOR_PROPERTY(Local);
+ APPLY_FOR_PROPERTY(ProtocolInterface);
#undef APPLY_FOR_PROPERTY
}
@@ -539,6 +543,7 @@ void index::printSymbolProperties(SymbolPropertySet Props, raw_ostream &OS) {
case SymbolProperty::IBOutletCollection: OS << "IBColl"; break;
case SymbolProperty::GKInspectable: OS << "GKI"; break;
case SymbolProperty::Local: OS << "local"; break;
+ case SymbolProperty::ProtocolInterface: OS << "protocol"; break;
}
});
}
diff --git a/lib/Index/IndexTypeSourceInfo.cpp b/lib/Index/IndexTypeSourceInfo.cpp
index 4b6bfbc67eff..85afc6345053 100644
--- a/lib/Index/IndexTypeSourceInfo.cpp
+++ b/lib/Index/IndexTypeSourceInfo.cpp
@@ -100,7 +100,8 @@ public:
bool VisitTagTypeLoc(TagTypeLoc TL) {
TagDecl *D = TL.getDecl();
- if (D->getParentFunctionOrMethod())
+ if (!IndexCtx.shouldIndexFunctionLocalSymbols() &&
+ D->getParentFunctionOrMethod())
return true;
if (TL.isDefinition()) {
@@ -130,14 +131,15 @@ public:
bool HandleTemplateSpecializationTypeLoc(TypeLocType TL) {
if (const auto *T = TL.getTypePtr()) {
if (IndexCtx.shouldIndexImplicitInstantiation()) {
- if (CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
IndexCtx.handleReference(RD, TL.getTemplateNameLoc(),
Parent, ParentDC, SymbolRoleSet(), Relations);
- } else {
- if (const TemplateDecl *D = T->getTemplateName().getAsTemplateDecl())
- IndexCtx.handleReference(D, TL.getTemplateNameLoc(),
- Parent, ParentDC, SymbolRoleSet(), Relations);
+ return true;
+ }
}
+ if (const TemplateDecl *D = T->getTemplateName().getAsTemplateDecl())
+ IndexCtx.handleReference(D, TL.getTemplateNameLoc(), Parent, ParentDC,
+ SymbolRoleSet(), Relations);
}
return true;
}
diff --git a/lib/Index/IndexingAction.cpp b/lib/Index/IndexingAction.cpp
index 16f6c21745ef..5cdec4b4528e 100644
--- a/lib/Index/IndexingAction.cpp
+++ b/lib/Index/IndexingAction.cpp
@@ -37,6 +37,7 @@ bool IndexDataConsumer::handleMacroOccurence(const IdentifierInfo *Name,
}
bool IndexDataConsumer::handleModuleOccurence(const ImportDecl *ImportD,
+ const Module *Mod,
SymbolRoleSet Roles,
SourceLocation Loc) {
return true;
@@ -215,23 +216,41 @@ static void indexTranslationUnit(ASTUnit &Unit, IndexingContext &IndexCtx) {
Unit.visitLocalTopLevelDecls(&IndexCtx, topLevelDeclVisitor);
}
+static void indexPreprocessorMacros(const Preprocessor &PP,
+ IndexDataConsumer &DataConsumer) {
+ for (const auto &M : PP.macros())
+ if (MacroDirective *MD = M.second.getLatest())
+ DataConsumer.handleMacroOccurence(
+ M.first, MD->getMacroInfo(),
+ static_cast<unsigned>(index::SymbolRole::Definition),
+ MD->getLocation());
+}
+
void index::indexASTUnit(ASTUnit &Unit, IndexDataConsumer &DataConsumer,
IndexingOptions Opts) {
IndexingContext IndexCtx(Opts, DataConsumer);
IndexCtx.setASTContext(Unit.getASTContext());
DataConsumer.initialize(Unit.getASTContext());
DataConsumer.setPreprocessor(Unit.getPreprocessorPtr());
+
+ if (Opts.IndexMacrosInPreprocessor)
+ indexPreprocessorMacros(Unit.getPreprocessor(), DataConsumer);
indexTranslationUnit(Unit, IndexCtx);
DataConsumer.finish();
}
-void index::indexTopLevelDecls(ASTContext &Ctx, ArrayRef<const Decl *> Decls,
+void index::indexTopLevelDecls(ASTContext &Ctx, Preprocessor &PP,
+ ArrayRef<const Decl *> Decls,
IndexDataConsumer &DataConsumer,
IndexingOptions Opts) {
IndexingContext IndexCtx(Opts, DataConsumer);
IndexCtx.setASTContext(Ctx);
DataConsumer.initialize(Ctx);
+
+ if (Opts.IndexMacrosInPreprocessor)
+ indexPreprocessorMacros(PP, DataConsumer);
+
for (const Decl *D : Decls)
IndexCtx.indexTopLevelDecl(D);
DataConsumer.finish();
@@ -251,6 +270,9 @@ void index::indexModuleFile(serialization::ModuleFile &Mod, ASTReader &Reader,
IndexCtx.setASTContext(Ctx);
DataConsumer.initialize(Ctx);
+ if (Opts.IndexMacrosInPreprocessor)
+ indexPreprocessorMacros(Reader.getPreprocessor(), DataConsumer);
+
for (const Decl *D : Reader.getModuleFileLevelDecls(Mod)) {
IndexCtx.indexTopLevelDecl(D);
}
diff --git a/lib/Index/IndexingContext.cpp b/lib/Index/IndexingContext.cpp
index 333f9dc3091b..bba6c8390b56 100644
--- a/lib/Index/IndexingContext.cpp
+++ b/lib/Index/IndexingContext.cpp
@@ -80,11 +80,27 @@ bool IndexingContext::handleReference(const NamedDecl *D, SourceLocation Loc,
RefE, RefD, DC);
}
+static void reportModuleReferences(const Module *Mod,
+ ArrayRef<SourceLocation> IdLocs,
+ const ImportDecl *ImportD,
+ IndexDataConsumer &DataConsumer) {
+ if (!Mod)
+ return;
+ reportModuleReferences(Mod->Parent, IdLocs.drop_back(), ImportD,
+ DataConsumer);
+ DataConsumer.handleModuleOccurence(ImportD, Mod,
+ (SymbolRoleSet)SymbolRole::Reference,
+ IdLocs.back());
+}
+
bool IndexingContext::importedModule(const ImportDecl *ImportD) {
+ if (ImportD->isInvalidDecl())
+ return true;
+
SourceLocation Loc;
auto IdLocs = ImportD->getIdentifierLocs();
if (!IdLocs.empty())
- Loc = IdLocs.front();
+ Loc = IdLocs.back();
else
Loc = ImportD->getLocation();
@@ -108,11 +124,17 @@ bool IndexingContext::importedModule(const ImportDecl *ImportD) {
}
}
+ const Module *Mod = ImportD->getImportedModule();
+ if (!ImportD->isImplicit() && Mod->Parent && !IdLocs.empty()) {
+ reportModuleReferences(Mod->Parent, IdLocs.drop_back(), ImportD,
+ DataConsumer);
+ }
+
SymbolRoleSet Roles = (unsigned)SymbolRole::Declaration;
if (ImportD->isImplicit())
Roles |= (unsigned)SymbolRole::Implicit;
- return DataConsumer.handleModuleOccurence(ImportD, Roles, Loc);
+ return DataConsumer.handleModuleOccurence(ImportD, Mod, Roles, Loc);
}
bool IndexingContext::isTemplateImplicitInstantiation(const Decl *D) {
diff --git a/lib/Index/SimpleFormatContext.h b/lib/Index/SimpleFormatContext.h
index 9c6d29bec329..24adcac60201 100644
--- a/lib/Index/SimpleFormatContext.h
+++ b/lib/Index/SimpleFormatContext.h
@@ -36,12 +36,10 @@ class SimpleFormatContext {
public:
SimpleFormatContext(LangOptions Options)
: DiagOpts(new DiagnosticOptions()),
- Diagnostics(new DiagnosticsEngine(new DiagnosticIDs,
- DiagOpts.get())),
- InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ Diagnostics(new DiagnosticsEngine(new DiagnosticIDs, DiagOpts.get())),
+ InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
Files(FileSystemOptions(), InMemoryFileSystem),
- Sources(*Diagnostics, Files),
- Rewrite(Sources, Options) {
+ Sources(*Diagnostics, Files), Rewrite(Sources, Options) {
Diagnostics->setClient(new IgnoringDiagConsumer, true);
}
@@ -63,7 +61,7 @@ public:
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem;
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem;
FileManager Files;
SourceManager Sources;
Rewriter Rewrite;
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index b128b69931a3..84ca753bf840 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -97,6 +97,7 @@ public:
void VisitTypedefDecl(const TypedefDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
void VisitVarDecl(const VarDecl *D);
+ void VisitBindingDecl(const BindingDecl *D);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
@@ -269,7 +270,8 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isStatic())
Out << 'S';
- if (unsigned quals = MD->getTypeQualifiers())
+ // FIXME: OpenCL: Need to consider address spaces
+ if (unsigned quals = MD->getTypeQualifiers().getCVRUQualifiers())
Out << (char)('0' + quals);
switch (MD->getRefQualifier()) {
case RQ_None: break;
@@ -334,6 +336,12 @@ void USRGenerator::VisitVarDecl(const VarDecl *D) {
}
}
+void USRGenerator::VisitBindingDecl(const BindingDecl *D) {
+ if (isLocal(D) && GenLoc(D, /*IncludeOffset=*/true))
+ return;
+ VisitNamedDecl(D);
+}
+
void USRGenerator::VisitNonTypeTemplateParmDecl(
const NonTypeTemplateParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
@@ -599,7 +607,7 @@ bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
D = D->getCanonicalDecl();
IgnoreResults =
- IgnoreResults || printLoc(Out, D->getLocStart(),
+ IgnoreResults || printLoc(Out, D->getBeginLoc(),
Context->getSourceManager(), IncludeOffset);
return IgnoreResults;
@@ -704,6 +712,9 @@ void USRGenerator::VisitType(QualType T) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
@@ -935,7 +946,7 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
case TemplateArgument::TemplateExpansion:
Out << 'P'; // pack expansion of...
- // Fall through
+ LLVM_FALLTHROUGH;
case TemplateArgument::Template:
VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
@@ -1094,3 +1105,40 @@ bool clang::index::generateUSRForMacro(StringRef MacroName, SourceLocation Loc,
Out << MacroName;
return false;
}
+
+bool clang::index::generateUSRForType(QualType T, ASTContext &Ctx,
+ SmallVectorImpl<char> &Buf) {
+ if (T.isNull())
+ return true;
+ T = T.getCanonicalType();
+
+ USRGenerator UG(&Ctx, Buf);
+ UG.VisitType(T);
+ return UG.ignoreResults();
+}
+
+bool clang::index::generateFullUSRForModule(const Module *Mod,
+ raw_ostream &OS) {
+ if (!Mod->Parent)
+ return generateFullUSRForTopLevelModuleName(Mod->Name, OS);
+ if (generateFullUSRForModule(Mod->Parent, OS))
+ return true;
+ return generateUSRFragmentForModule(Mod, OS);
+}
+
+bool clang::index::generateFullUSRForTopLevelModuleName(StringRef ModName,
+ raw_ostream &OS) {
+ OS << getUSRSpacePrefix();
+ return generateUSRFragmentForModuleName(ModName, OS);
+}
+
+bool clang::index::generateUSRFragmentForModule(const Module *Mod,
+ raw_ostream &OS) {
+ return generateUSRFragmentForModuleName(Mod->Name, OS);
+}
+
+bool clang::index::generateUSRFragmentForModuleName(StringRef ModName,
+ raw_ostream &OS) {
+ OS << "@M@" << ModName;
+ return false;
+}
diff --git a/lib/Lex/CMakeLists.txt b/lib/Lex/CMakeLists.txt
index 38df144adf9a..7888b15cb66e 100644
--- a/lib/Lex/CMakeLists.txt
+++ b/lib/Lex/CMakeLists.txt
@@ -17,7 +17,6 @@ add_clang_library(clangLex
PPExpressions.cpp
PPLexerChange.cpp
PPMacroExpansion.cpp
- PTHLexer.cpp
Pragma.cpp
PreprocessingRecord.cpp
Preprocessor.cpp
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
index 24a14b6cdb57..23cb053c2d71 100644
--- a/lib/Lex/HeaderMap.cpp
+++ b/lib/Lex/HeaderMap.cpp
@@ -48,7 +48,8 @@ static inline unsigned HashHMapKey(StringRef Str) {
/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
/// into the string error argument and returns null.
-const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
+std::unique_ptr<HeaderMap> HeaderMap::Create(const FileEntry *FE,
+ FileManager &FM) {
// If the file is too small to be a header map, ignore it.
unsigned FileSize = FE->getSize();
if (FileSize <= sizeof(HMapHeader)) return nullptr;
@@ -59,7 +60,7 @@ const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
bool NeedsByteSwap;
if (!checkHeader(**FileBuffer, NeedsByteSwap))
return nullptr;
- return new HeaderMap(std::move(*FileBuffer), NeedsByteSwap);
+ return std::unique_ptr<HeaderMap>(new HeaderMap(std::move(*FileBuffer), NeedsByteSwap));
}
bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index fbfa54b2fffd..c65fb47c0fe5 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderMap.h"
@@ -35,6 +34,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -75,12 +75,6 @@ HeaderSearch::HeaderSearch(std::shared_ptr<HeaderSearchOptions> HSOpts,
FileMgr(SourceMgr.getFileManager()), FrameworkMap(64),
ModMap(SourceMgr, Diags, LangOpts, Target, *this) {}
-HeaderSearch::~HeaderSearch() {
- // Delete headermaps.
- for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
- delete HeaderMaps[i].second;
-}
-
void HeaderSearch::PrintStats() {
fprintf(stderr, "\n*** HeaderSearch Stats:\n");
fprintf(stderr, "%d files tracked.\n", (int)FileInfo.size());
@@ -113,12 +107,12 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
// Pointer equality comparison of FileEntries works because they are
// already uniqued by inode.
if (HeaderMaps[i].first == FE)
- return HeaderMaps[i].second;
+ return HeaderMaps[i].second.get();
}
- if (const HeaderMap *HM = HeaderMap::Create(FE, FileMgr)) {
- HeaderMaps.push_back(std::make_pair(FE, HM));
- return HM;
+ if (std::unique_ptr<HeaderMap> HM = HeaderMap::Create(FE, FileMgr)) {
+ HeaderMaps.emplace_back(FE, std::move(HM));
+ return HeaderMaps.back().second.get();
}
return nullptr;
@@ -654,7 +648,7 @@ static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
++I;
}
- return FoundComp >= 2;
+ return !FrameworkName.empty() && FoundComp >= 2;
}
static void
@@ -1577,20 +1571,21 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
DirNative);
// Search each of the ".framework" directories to load them as modules.
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (llvm::sys::path::extension(Dir->getName()) != ".framework")
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
continue;
const DirectoryEntry *FrameworkDir =
- FileMgr.getDirectory(Dir->getName());
+ FileMgr.getDirectory(Dir->path());
if (!FrameworkDir)
continue;
// Load this framework module.
- loadFrameworkModule(llvm::sys::path::stem(Dir->getName()),
- FrameworkDir, IsSystem);
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
}
continue;
}
@@ -1643,15 +1638,16 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
return;
std::error_code EC;
+ SmallString<128> Dir = SearchDir.getDir()->getName();
+ FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
- llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ llvm::sys::path::native(Dir, DirNative);
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- bool IsFramework =
- llvm::sys::path::extension(Dir->getName()) == ".framework";
+ bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
if (IsFramework == SearchDir.isFramework())
- loadModuleMapFile(Dir->getName(), SearchDir.isSystemHeaderDirectory(),
+ loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
SearchDir.isFramework());
}
@@ -1682,9 +1678,8 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
StringRef Dir = SearchDirs[I].getDir()->getName();
llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
if (!WorkingDir.empty() && !path::is_absolute(Dir)) {
- auto err = fs::make_absolute(WorkingDir, DirPath);
- if (!err)
- path::remove_dots(DirPath, /*remove_dot_dot=*/true);
+ fs::make_absolute(WorkingDir, DirPath);
+ path::remove_dots(DirPath, /*remove_dot_dot=*/true);
Dir = DirPath;
}
for (auto NI = path::begin(File), NE = path::end(File),
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index e8588a771a43..d4723091114a 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -1015,7 +1015,7 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
StringRef Lexer::getImmediateMacroNameForDiagnostics(
SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) {
assert(Loc.isMacroID() && "Only reasonable to call this on macros");
- // Walk past macro argument expanions.
+ // Walk past macro argument expansions.
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
@@ -1510,8 +1510,17 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
bool operator<(HomoglyphPair R) const { return Character < R.Character; }
};
static constexpr HomoglyphPair SortedHomoglyphs[] = {
+ {U'\u00ad', 0}, // SOFT HYPHEN
{U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK
{U'\u037e', ';'}, // GREEK QUESTION MARK
+ {U'\u200b', 0}, // ZERO WIDTH SPACE
+ {U'\u200c', 0}, // ZERO WIDTH NON-JOINER
+ {U'\u200d', 0}, // ZERO WIDTH JOINER
+ {U'\u2060', 0}, // WORD JOINER
+ {U'\u2061', 0}, // FUNCTION APPLICATION
+ {U'\u2062', 0}, // INVISIBLE TIMES
+ {U'\u2063', 0}, // INVISIBLE SEPARATOR
+ {U'\u2064', 0}, // INVISIBLE PLUS
{U'\u2212', '-'}, // MINUS SIGN
{U'\u2215', '/'}, // DIVISION SLASH
{U'\u2216', '\\'}, // SET MINUS
@@ -1521,6 +1530,7 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
{U'\u2236', ':'}, // RATIO
{U'\u223c', '~'}, // TILDE OPERATOR
{U'\ua789', ':'}, // MODIFIER LETTER COLON
+ {U'\ufeff', 0}, // ZERO WIDTH NO-BREAK SPACE
{U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK
{U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN
{U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN
@@ -1560,9 +1570,14 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
llvm::raw_svector_ostream CharOS(CharBuf);
llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
}
- const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
- Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
- << Range << CharBuf << LooksLikeStr;
+ if (Homoglyph->LooksLike) {
+ const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
+ Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
+ << Range << CharBuf << LooksLikeStr;
+ } else {
+ Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
+ << Range << CharBuf;
+ }
}
}
@@ -1881,6 +1896,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
/// either " or L" or u8" or u" or U".
bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
tok::TokenKind Kind) {
+ const char *AfterQuote = CurPtr;
// Does this string contain the \0 character?
const char *NulCharacter = nullptr;
@@ -1909,8 +1925,11 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
if (C == 0) {
if (isCodeCompletionPoint(CurPtr-1)) {
- PP->CodeCompleteNaturalLanguage();
- FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ if (ParsingFilename)
+ codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false);
+ else
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
cutOffLexing();
return true;
}
@@ -2028,9 +2047,8 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
- if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
- isCodeCompletionPoint(CurPtr-1)))) {
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file.
// If the filename is unterminated, then it must just be a lone <
// character. Return this as such.
FormTokenWithChars(Result, AfterLessPos, tok::less);
@@ -2038,6 +2056,12 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
}
if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr - 1)) {
+ codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true);
+ cutOffLexing();
+ FormTokenWithChars(Result, CurPtr - 1, tok::unknown);
+ return true;
+ }
NulCharacter = CurPtr-1;
}
C = getAndAdvanceChar(CurPtr, Result);
@@ -2054,6 +2078,34 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
return true;
}
+void Lexer::codeCompleteIncludedFile(const char *PathStart,
+ const char *CompletionPoint,
+ bool IsAngled) {
+ // Completion only applies to the filename, after the last slash.
+ StringRef PartialPath(PathStart, CompletionPoint - PathStart);
+ auto Slash = PartialPath.find_last_of(LangOpts.MSVCCompat ? "/\\" : "/");
+ StringRef Dir =
+ (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash);
+ const char *StartOfFilename =
+ (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1;
+ // Code completion filter range is the filename only, up to completion point.
+ PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get(
+ StringRef(StartOfFilename, CompletionPoint - StartOfFilename)));
+ // We should replace the characters up to the closing quote, if any.
+ while (CompletionPoint < BufferEnd) {
+ char Next = *(CompletionPoint + 1);
+ if (Next == 0 || Next == '\r' || Next == '\n')
+ break;
+ ++CompletionPoint;
+ if (Next == (IsAngled ? '>' : '"'))
+ break;
+ }
+ PP->setCodeCompletionTokenRange(
+ FileLoc.getLocWithOffset(StartOfFilename - BufferStart),
+ FileLoc.getLocWithOffset(CompletionPoint - BufferStart));
+ PP->CodeCompleteIncludedFile(Dir, IsAngled);
+}
+
/// LexCharConstant - Lex the remainder of a character constant, after having
/// lexed either ' or L' or u8' or u' or U'.
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
@@ -3033,6 +3085,8 @@ bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
makeCharRange(*this, BufferPtr, CurPtr),
/*IsFirst=*/true);
+ maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr));
}
MIOpt.ReadToken();
@@ -3790,7 +3844,7 @@ LexNextToken:
case '@':
// Objective C support.
- if (CurPtr[-1] == '@' && LangOpts.ObjC1)
+ if (CurPtr[-1] == '@' && LangOpts.ObjC)
Kind = tok::at;
else
Kind = tok::unknown;
@@ -3827,7 +3881,6 @@ LexNextToken:
// We can't just reset CurPtr to BufferPtr because BufferPtr may point to
// an escaped newline.
--CurPtr;
- const char *UTF8StartPtr = CurPtr;
llvm::ConversionResult Status =
llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
(const llvm::UTF8 *)BufferEnd,
@@ -3842,9 +3895,6 @@ LexNextToken:
// (We manually eliminate the tail call to avoid recursion.)
goto LexNextToken;
}
- if (!isLexingRawMode())
- maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
- makeCharRange(*this, UTF8StartPtr, CurPtr));
return LexUnicode(Result, CodePoint, CurPtr);
}
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index 3f2af1a74e5a..fa0815eb9c6c 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -693,7 +693,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
break;
}
}
- // fall through.
+ LLVM_FALLTHROUGH;
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 4ed69ecc465d..434c12007596 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -200,7 +200,8 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
}
const MacroDirective::DefInfo
-MacroDirective::findDirectiveAtLoc(SourceLocation L, SourceManager &SM) const {
+MacroDirective::findDirectiveAtLoc(SourceLocation L,
+ const SourceManager &SM) const {
assert(L.isValid() && "SourceLocation is invalid.");
for (DefInfo Def = getDefinition(); Def; Def = Def.getPreviousDefinition()) {
if (Def.getLocation().isInvalid() || // For macros defined on the command line.
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index 87749f74734c..cff950b703a6 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -22,7 +22,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -43,6 +42,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -54,6 +54,8 @@
using namespace clang;
+void ModuleMapCallbacks::anchor() {}
+
void ModuleMap::resolveLinkAsDependencies(Module *Mod) {
auto PendingLinkAs = PendingLinkAsModule.find(Mod->Name);
if (PendingLinkAs != PendingLinkAsModule.end()) {
@@ -523,7 +525,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// At this point, only non-modular includes remain.
- if (LangOpts.ModulesStrictDeclUse) {
+ if (RequestingModule && LangOpts.ModulesStrictDeclUse) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
<< RequestingModule->getTopLevelModule()->Name << Filename;
} else if (RequestingModule && RequestingModuleIsModuleInterface &&
@@ -806,12 +808,11 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
}
Module *ModuleMap::createGlobalModuleForInterfaceUnit(SourceLocation Loc) {
- assert(!PendingGlobalModule && "created multiple global modules");
- PendingGlobalModule.reset(
+ PendingSubmodules.emplace_back(
new Module("<global>", Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ true, NumCreatedModules++));
- PendingGlobalModule->Kind = Module::GlobalModuleFragment;
- return PendingGlobalModule.get();
+ PendingSubmodules.back()->Kind = Module::GlobalModuleFragment;
+ return PendingSubmodules.back().get();
}
Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
@@ -827,10 +828,11 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
Modules[Name] = SourceModule = Result;
// Reparent the current global module fragment as a submodule of this module.
- assert(GlobalModule == PendingGlobalModule.get() &&
- "unexpected global module");
- GlobalModule->setParent(Result);
- PendingGlobalModule.release(); // now owned by parent
+ for (auto &Submodule : PendingSubmodules) {
+ Submodule->setParent(Result);
+ Submodule.release(); // now owned by parent
+ }
+ PendingSubmodules.clear();
// Mark the main source file as being within the newly-created module so that
// declarations and macros are properly visibility-restricted to it.
@@ -841,6 +843,29 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
return Result;
}
+Module *ModuleMap::createHeaderModule(StringRef Name,
+ ArrayRef<Module::Header> Headers) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result =
+ new Module(Name, SourceLocation(), nullptr, /*IsFramework*/ false,
+ /*IsExplicit*/ false, NumCreatedModules++);
+ Result->Kind = Module::ModuleInterfaceUnit;
+ Modules[Name] = SourceModule = Result;
+
+ for (const Module::Header &H : Headers) {
+ auto *M = new Module(H.NameAsWritten, SourceLocation(), Result,
+ /*IsFramework*/ false,
+ /*IsExplicit*/ true, NumCreatedModules++);
+ // Header modules are implicitly 'export *'.
+ M->Exports.push_back(Module::ExportDecl(nullptr, true));
+ addHeader(M, H, NormalHeader);
+ }
+
+ return Result;
+}
+
/// For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
@@ -997,15 +1022,16 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
= StringRef(FrameworkDir->getName());
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
- for (vfs::directory_iterator Dir = FS.dir_begin(SubframeworksDirName, EC),
- DirEnd;
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (llvm::vfs::directory_iterator
+ Dir = FS.dir_begin(SubframeworksDirName, EC),
+ DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (!StringRef(Dir->getName()).endswith(".framework"))
+ if (!StringRef(Dir->path()).endswith(".framework"))
continue;
if (const DirectoryEntry *SubframeworkDir =
- FileMgr.getDirectory(Dir->getName())) {
+ FileMgr.getDirectory(Dir->path())) {
// Note: as an egregious but useful hack, we use the real path here and
// check whether it is actually a subdirectory of the parent directory.
// This will not be the case if the 'subframework' is actually a symlink
@@ -2371,13 +2397,13 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
// uncommonly used Tcl module on Darwin platforms.
std::error_code EC;
SmallVector<Module::Header, 6> Headers;
- vfs::FileSystem &FS = *SourceMgr.getFileManager().getVirtualFileSystem();
- for (vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
+ llvm::vfs::FileSystem &FS =
+ *SourceMgr.getFileManager().getVirtualFileSystem();
+ for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
- if (const FileEntry *FE =
- SourceMgr.getFileManager().getFile(I->getName())) {
+ if (const FileEntry *FE = SourceMgr.getFileManager().getFile(I->path())) {
- Module::Header Header = {I->getName(), FE};
+ Module::Header Header = {I->path(), FE};
Headers.push_back(std::move(Header));
}
}
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 66a9faa6e60a..d62a3513c777 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
@@ -119,7 +118,7 @@ static bool isReservedId(StringRef Text, const LangOptions &Lang) {
// the specified module, meaning clang won't build the specified module. This is
// useful in a number of situations, for instance, when building a library that
// vends a module map, one might want to avoid hitting intermediate build
-// products containig the the module map or avoid finding the system installed
+// products containimg the the module map or avoid finding the system installed
// modulemap for that library.
static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
StringRef ModuleName) {
@@ -383,11 +382,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/ false,
FoundNonSkipPortion, FoundElse);
- if (CurPTHLexer) {
- PTHSkipExcludedConditionalBlock();
- return;
- }
-
// Enter raw mode to disable identifier lookup (and thus macro expansion),
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
@@ -405,7 +399,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If this is the end of the buffer, we have an error.
if (Tok.is(tok::eof)) {
// We don't emit errors for unterminated conditionals here,
- // Lexer::LexEndOfFile can do that propertly.
+ // Lexer::LexEndOfFile can do that properly.
// Just return and let the caller lex after this #include.
if (PreambleConditionalStack.isRecording())
PreambleConditionalStack.SkipInfo.emplace(
@@ -585,83 +579,6 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
Tok.getLocation());
}
-void Preprocessor::PTHSkipExcludedConditionalBlock() {
- while (true) {
- assert(CurPTHLexer);
- assert(CurPTHLexer->LexingRawMode == false);
-
- // Skip to the next '#else', '#elif', or #endif.
- if (CurPTHLexer->SkipBlock()) {
- // We have reached an #endif. Both the '#' and 'endif' tokens
- // have been consumed by the PTHLexer. Just pop off the condition level.
- PPConditionalInfo CondInfo;
- bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
- (void)InCond; // Silence warning in no-asserts mode.
- assert(!InCond && "Can't be skipping if not in a conditional!");
- break;
- }
-
- // We have reached a '#else' or '#elif'. Lex the next token to get
- // the directive flavor.
- Token Tok;
- LexUnexpandedToken(Tok);
-
- // We can actually look up the IdentifierInfo here since we aren't in
- // raw mode.
- tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
-
- if (K == tok::pp_else) {
- // #else: Enter the else condition. We aren't in a nested condition
- // since we skip those. We're always in the one matching the last
- // blocked we skipped.
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
- // Note that we've seen a #else in this conditional.
- CondInfo.FoundElse = true;
-
- // If the #if block wasn't entered then enter the #else block now.
- if (!CondInfo.FoundNonSkip) {
- CondInfo.FoundNonSkip = true;
-
- // Scan until the eod token.
- CurPTHLexer->ParsingPreprocessorDirective = true;
- DiscardUntilEndOfDirective();
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- break;
- }
-
- // Otherwise skip this block.
- continue;
- }
-
- assert(K == tok::pp_elif);
- PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
-
- // If this is a #elif with a #else before it, report the error.
- if (CondInfo.FoundElse)
- Diag(Tok, diag::pp_err_elif_after_else);
-
- // If this is in a skipping block or if we're already handled this #if
- // block, don't bother parsing the condition. We just skip this block.
- if (CondInfo.FoundNonSkip)
- continue;
-
- // Evaluate the condition of the #elif.
- IdentifierInfo *IfNDefMacro = nullptr;
- CurPTHLexer->ParsingPreprocessorDirective = true;
- bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro).Conditional;
- CurPTHLexer->ParsingPreprocessorDirective = false;
-
- // If this condition is true, enter it!
- if (ShouldEnter) {
- CondInfo.FoundNonSkip = true;
- break;
- }
-
- // Otherwise, skip this block and go to the next one.
- }
-}
-
Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
if (!SourceMgr.isInMainFile(Loc)) {
// Try to determine the module of the include directive.
@@ -690,7 +607,7 @@ Preprocessor::getModuleHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
return nullptr;
Module *TopM = M->getTopLevelModule();
@@ -887,18 +804,29 @@ private:
bool save;
};
-/// Process a directive while looking for the through header.
-/// Only #include (to check if it is the through header) and #define (to warn
-/// about macros that don't match the PCH) are handled. All other directives
-/// are completely discarded.
-void Preprocessor::HandleSkippedThroughHeaderDirective(Token &Result,
+/// Process a directive while looking for the through header or a #pragma
+/// hdrstop. The following directives are handled:
+/// #include (to check if it is the through header)
+/// #define (to warn about macros that don't match the PCH)
+/// #pragma (to check for pragma hdrstop).
+/// All other directives are completely discarded.
+void Preprocessor::HandleSkippedDirectiveWhileUsingPCH(Token &Result,
SourceLocation HashLoc) {
if (const IdentifierInfo *II = Result.getIdentifierInfo()) {
- if (II->getPPKeywordID() == tok::pp_include)
- return HandleIncludeDirective(HashLoc, Result);
- if (II->getPPKeywordID() == tok::pp_define)
+ if (II->getPPKeywordID() == tok::pp_define) {
return HandleDefineDirective(Result,
/*ImmediatelyAfterHeaderGuard=*/false);
+ }
+ if (SkippingUntilPCHThroughHeader &&
+ II->getPPKeywordID() == tok::pp_include) {
+ return HandleIncludeDirective(HashLoc, Result);
+ }
+ if (SkippingUntilPragmaHdrStop && II->getPPKeywordID() == tok::pp_pragma) {
+ Token P = LookAhead(0);
+ auto *II = P.getIdentifierInfo();
+ if (II && II->getName() == "hdrstop")
+ return HandlePragmaDirective(HashLoc, PIK_HashPragma);
+ }
}
DiscardUntilEndOfDirective();
}
@@ -964,8 +892,8 @@ void Preprocessor::HandleDirective(Token &Result) {
// and reset to previous state when returning from this function.
ResetMacroExpansionHelper helper(this);
- if (SkippingUntilPCHThroughHeader)
- return HandleSkippedThroughHeaderDirective(Result, SavedHash.getLocation());
+ if (SkippingUntilPCHThroughHeader || SkippingUntilPragmaHdrStop)
+ return HandleSkippedDirectiveWhileUsingPCH(Result, SavedHash.getLocation());
switch (Result.getKind()) {
case tok::eod:
@@ -1376,10 +1304,6 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
///
void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
bool isWarning) {
- // PTH doesn't emit #warning or #error directives.
- if (CurPTHLexer)
- return CurPTHLexer->DiscardToEndOfLine();
-
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
@@ -1618,7 +1542,7 @@ static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
SourceLocation PathEnd) {
- assert(PP.getLangOpts().ObjC2 && "no import syntax available");
+ assert(PP.getLangOpts().ObjC && "no import syntax available");
SmallString<128> PathString;
for (size_t I = 0, N = Path.size(); I != N; ++I) {
@@ -1783,6 +1707,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Check that we don't have infinite #include recursion.
if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
Diag(FilenameTok, diag::err_pp_include_too_deep);
+ HasReachedMaxIncludeDepth = true;
return;
}
@@ -1868,15 +1793,58 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
if (File) {
SourceRange Range(FilenameTok.getLocation(), CharEnd);
- Diag(FilenameTok, diag::err_pp_file_not_found_not_fatal) <<
+ Diag(FilenameTok, diag::err_pp_file_not_found_angled_include_not_fatal) <<
Filename <<
FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\"");
}
}
+ // Check for likely typos due to leading or trailing non-isAlphanumeric
+ // characters
+ StringRef OriginalFilename = Filename;
+ if (LangOpts.SpellChecking && !File) {
+ // A heuristic to correct a typo file name by removing leading and
+ // trailing non-isAlphanumeric characters.
+ auto CorrectTypoFilename = [](llvm::StringRef Filename) {
+ Filename = Filename.drop_until(isAlphanumeric);
+ while (!Filename.empty() && !isAlphanumeric(Filename.back())) {
+ Filename = Filename.drop_back();
+ }
+ return Filename;
+ };
+ StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
+ SmallString<128> NormalizedTypoCorrectionPath;
+ if (LangOpts.MSVCCompat) {
+ NormalizedTypoCorrectionPath = TypoCorrectionName.str();
+#ifndef _WIN32
+ llvm::sys::path::native(NormalizedTypoCorrectionPath);
+#endif
+ }
+ File = LookupFile(
+ FilenameLoc,
+ LangOpts.MSVCCompat ? NormalizedTypoCorrectionPath.c_str()
+ : TypoCorrectionName,
+ isAngled, LookupFrom, LookupFromFile, CurDir,
+ Callbacks ? &SearchPath : nullptr,
+ Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped);
+ if (File) {
+ SourceRange Range(FilenameTok.getLocation(), CharEnd);
+ auto Hint = isAngled
+ ? FixItHint::CreateReplacement(
+ Range, "<" + TypoCorrectionName.str() + ">")
+ : FixItHint::CreateReplacement(
+ Range, "\"" + TypoCorrectionName.str() + "\"");
+ Diag(FilenameTok, diag::err_pp_file_not_found_typo_not_fatal)
+ << OriginalFilename << TypoCorrectionName << Hint;
+ // We found the file, so set the Filename to the name after typo
+ // correction.
+ Filename = TypoCorrectionName;
+ }
+ }
+
// If the file is still not found, just go with the vanilla diagnostic
if (!File)
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename
+ Diag(FilenameTok, diag::err_pp_file_not_found) << OriginalFilename
<< FilenameRange;
}
}
@@ -1896,10 +1864,11 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (PPOpts->SingleFileParseMode)
ShouldEnter = false;
- // Any diagnostics after the fatal error will not be visible. As the
- // compilation failed already and errors in subsequently included files won't
- // be visible, avoid preprocessing those files.
- if (ShouldEnter && Diags->hasFatalErrorOccurred())
+ // If we've reached the max allowed include depth, it is usually due to an
+ // include cycle. Don't enter already processed files again as it can lead to
+ // reaching the max allowed include depth again.
+ if (ShouldEnter && HasReachedMaxIncludeDepth && File &&
+ HeaderInfo.getFileInfo(File).NumIncludes)
ShouldEnter = false;
// Determine whether we should try to import the module for this #include, if
@@ -1932,7 +1901,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Warn that we're replacing the include/import with a module import.
// We only do this in Objective-C, where we have a module-import syntax.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd);
// Load the module to import its macros. We'll make the declarations
@@ -1961,14 +1930,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
if (hadModuleLoaderFatalFailure()) {
// With a fatal failure in the module loader, we abort parsing.
Token &Result = IncludeTok;
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer->cutOffLexing();
- } else {
- assert(CurPTHLexer && "#include but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- }
+ assert(CurLexer && "#include but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
}
return;
}
@@ -2169,7 +2134,7 @@ void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
///
void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
Token &ImportTok) {
- if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (!LangOpts.ObjC) { // #import is standard for ObjC.
if (LangOpts.MSVCCompat)
return HandleMicrosoftImportDirective(ImportTok);
Diag(ImportTok, diag::ext_pp_import_directive);
@@ -2640,7 +2605,7 @@ void Preprocessor::HandleDefineDirective(
II->isStr("__unsafe_unretained") ||
II->isStr("__autoreleasing");
};
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
SourceMgr.getFileID(OtherMI->getDefinitionLoc())
== getPredefinesFileID() &&
isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 6631b13b1583..e321dd38fed6 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -19,7 +19,6 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroInfo.h"
-#include "clang/Lex/PTHManager.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -76,13 +75,6 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
if (MaxIncludeStackDepth < IncludeMacroStack.size())
MaxIncludeStackDepth = IncludeMacroStack.size();
- if (PTH) {
- if (PTHLexer *PL = PTH->CreateLexer(FID)) {
- EnterSourceFileWithPTH(PL, CurDir);
- return false;
- }
- }
-
// Get the MemoryBuffer for this FID, if it fails, we fail.
bool Invalid = false;
const llvm::MemoryBuffer *InputFile =
@@ -131,31 +123,6 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
}
}
-/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
-/// and start getting tokens from it using the PTH cache.
-void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
- const DirectoryLookup *CurDir) {
-
- if (CurPPLexer || CurTokenLexer)
- PushIncludeMacroStack();
-
- CurDirLookup = CurDir;
- CurPTHLexer.reset(PL);
- CurPPLexer = CurPTHLexer.get();
- CurLexerSubmodule = nullptr;
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_PTHLexer;
-
- // Notify the client, if desired, that we are in a new source file.
- if (Callbacks) {
- FileID FID = CurPPLexer->getFileID();
- SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
- SrcMgr::CharacteristicKind FileType =
- SourceMgr.getFileCharacteristic(EnterLoc);
- Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
- }
-}
-
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
@@ -304,20 +271,21 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
- vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ llvm::vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
std::error_code EC;
- for (vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC), End;
+ for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
+ End;
Entry != End && !EC; Entry.increment(EC)) {
using llvm::StringSwitch;
// Check whether this entry has an extension typically associated with
// headers.
- if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->getName()))
+ if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
.Cases(".h", ".H", ".hh", ".hpp", true)
.Default(false))
continue;
- if (const FileEntry *Header = getFileManager().getFile(Entry->getName()))
+ if (const FileEntry *Header = getFileManager().getFile(Entry->path()))
if (!getSourceManager().hasFileInfo(Header)) {
if (!ModMap.isHeaderInUnavailableModule(Header)) {
// Find the relative path that would access this header.
@@ -339,7 +307,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
// If we have an unclosed module region from a pragma at the end of a
// module, complain and close it now.
- // FIXME: This is not correct if we are building a module from PTH.
const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
if ((LeavingSubmodule || IncludeMacroStack.empty()) &&
!BuildingSubmoduleStack.empty() &&
@@ -436,15 +403,10 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (isCodeCompletionEnabled() && CurPPLexer &&
SourceMgr.getLocForStartOfFile(CurPPLexer->getFileID()) ==
CodeCompletionFileLoc) {
- if (CurLexer) {
- Result.startToken();
- CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer.reset();
CurPPLexer = nullptr;
recomputeCurLexerKind();
@@ -522,39 +484,34 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
}
// If this is the end of the main file, form an EOF token.
- if (CurLexer) {
- const char *EndPos = getCurLexerEndPos();
- Result.startToken();
- CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
-
- if (isCodeCompletionEnabled()) {
- // Inserting the code-completion point increases the source buffer by 1,
- // but the main FileID was created before inserting the point.
- // Compensate by reducing the EOF location by 1, otherwise the location
- // will point to the next FileID.
- // FIXME: This is hacky, the code-completion point should probably be
- // inserted before the main FileID is created.
- if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
- Result.setLocation(Result.getLocation().getLocWithOffset(-1));
- }
-
- if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
- // Reached the end of the compilation without finding the through header.
- Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
- << PPOpts->PCHThroughHeader << 0;
- }
+ assert(CurLexer && "Got EOF but no current lexer set!");
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (isCodeCompletionEnabled()) {
+ // Inserting the code-completion point increases the source buffer by 1,
+ // but the main FileID was created before inserting the point.
+ // Compensate by reducing the EOF location by 1, otherwise the location
+ // will point to the next FileID.
+ // FIXME: This is hacky, the code-completion point should probably be
+ // inserted before the main FileID is created.
+ if (CurLexer->getFileLoc() == CodeCompletionFileLoc)
+ Result.setLocation(Result.getLocation().getLocWithOffset(-1));
+ }
- if (!isIncrementalProcessingEnabled())
- // We're done with lexing.
- CurLexer.reset();
- } else {
- assert(CurPTHLexer && "Got EOF but no current lexer set!");
- CurPTHLexer->getEOF(Result);
- CurPTHLexer.reset();
+ if (creatingPCHWithThroughHeader() && !LeavingPCHThroughHeader) {
+ // Reached the end of the compilation without finding the through header.
+ Diag(CurLexer->getFileLoc(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 0;
}
if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
+
+ if (!isIncrementalProcessingEnabled())
CurPPLexer = nullptr;
if (TUKind == TU_Complete) {
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 346dd947c028..c70ff46ec904 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -23,12 +23,12 @@
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
@@ -428,8 +428,6 @@ bool Preprocessor::isNextPPTokenLParen() {
unsigned Val;
if (CurLexer)
Val = CurLexer->isNextPPTokenLParen();
- else if (CurPTHLexer)
- Val = CurPTHLexer->isNextPPTokenLParen();
else
Val = CurTokenLexer->isNextTokenLParen();
@@ -442,8 +440,6 @@ bool Preprocessor::isNextPPTokenLParen() {
for (const IncludeStackInfo &Entry : llvm::reverse(IncludeMacroStack)) {
if (Entry.TheLexer)
Val = Entry.TheLexer->isNextPPTokenLParen();
- else if (Entry.ThePTHLexer)
- Val = Entry.ThePTHLexer->isNextPPTokenLParen();
else
Val = Entry.TheTokenLexer->isNextTokenLParen();
@@ -1242,6 +1238,13 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
CurDir, nullptr, nullptr, nullptr, nullptr);
+ if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
+ SrcMgr::CharacteristicKind FileType = SrcMgr::C_User;
+ if (File)
+ FileType = PP.getHeaderSearchInfo().getFileDirFlavor(File);
+ Callbacks->HasInclude(FilenameLoc, Filename, isAngled, File, FileType);
+ }
+
// Get the result value. A result of true means the file exists.
return File != nullptr;
}
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
deleted file mode 100644
index 45cff56dcaa1..000000000000
--- a/lib/Lex/PTHLexer.cpp
+++ /dev/null
@@ -1,748 +0,0 @@
-//===- PTHLexer.cpp - Lex from a token stream -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the PTHLexer interface.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
-#include "clang/Basic/IdentifierTable.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/TokenKinds.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Lex/PTHManager.h"
-#include "clang/Lex/Preprocessor.h"
-#include "clang/Lex/Token.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/DJB.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/OnDiskHashTable.h"
-#include <cassert>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
-#include <ctime>
-#include <memory>
-#include <utility>
-
-using namespace clang;
-
-static const unsigned StoredTokenSize = 1 + 1 + 2 + 4 + 4;
-
-//===----------------------------------------------------------------------===//
-// PTHLexer methods.
-//===----------------------------------------------------------------------===//
-
-PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
- const unsigned char *ppcond, PTHManager &PM)
- : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), PPCond(ppcond),
- CurPPCondPtr(ppcond), PTHMgr(PM) {
- FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
-}
-
-bool PTHLexer::Lex(Token& Tok) {
- //===--------------------------------------==//
- // Read the raw token data.
- //===--------------------------------------==//
- using namespace llvm::support;
-
- // Shadow CurPtr into an automatic variable.
- const unsigned char *CurPtrShadow = CurPtr;
-
- // Read in the data for the token.
- unsigned Word0 = endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t IdentifierID =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
- uint32_t FileOffset =
- endian::readNext<uint32_t, little, aligned>(CurPtrShadow);
-
- tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
- Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
- uint32_t Len = Word0 >> 16;
-
- CurPtr = CurPtrShadow;
-
- //===--------------------------------------==//
- // Construct the token itself.
- //===--------------------------------------==//
-
- Tok.startToken();
- Tok.setKind(TKind);
- Tok.setFlag(TFlags);
- assert(!LexingRawMode);
- Tok.setLocation(FileStartLoc.getLocWithOffset(FileOffset));
- Tok.setLength(Len);
-
- // Handle identifiers.
- if (Tok.isLiteral()) {
- Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
- }
- else if (IdentifierID) {
- MIOpt.ReadToken();
- IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
-
- Tok.setIdentifierInfo(II);
-
- // Change the kind of this identifier to the appropriate token kind, e.g.
- // turning "for" into a keyword.
- Tok.setKind(II->getTokenID());
-
- if (II->isHandleIdentifierCase())
- return PP->HandleIdentifier(Tok);
-
- return true;
- }
-
- //===--------------------------------------==//
- // Process the token.
- //===--------------------------------------==//
- if (TKind == tok::eof) {
- // Save the end-of-file token.
- EofToken = Tok;
-
- assert(!ParsingPreprocessorDirective);
- assert(!LexingRawMode);
-
- return LexEndOfFile(Tok);
- }
-
- if (TKind == tok::hash && Tok.isAtStartOfLine()) {
- LastHashTokPtr = CurPtr - StoredTokenSize;
- assert(!LexingRawMode);
- PP->HandleDirective(Tok);
-
- return false;
- }
-
- if (TKind == tok::eod) {
- assert(ParsingPreprocessorDirective);
- ParsingPreprocessorDirective = false;
- return true;
- }
-
- MIOpt.ReadToken();
- return true;
-}
-
-bool PTHLexer::LexEndOfFile(Token &Result) {
- // If we hit the end of the file while parsing a preprocessor directive,
- // end the preprocessor directive first. The next token returned will
- // then be the end of file.
- if (ParsingPreprocessorDirective) {
- ParsingPreprocessorDirective = false; // Done parsing the "line".
- return true; // Have a token.
- }
-
- assert(!LexingRawMode);
-
- // If we are in a #if directive, emit an error.
- while (!ConditionalStack.empty()) {
- if (PP->getCodeCompletionFileLoc() != FileStartLoc)
- PP->Diag(ConditionalStack.back().IfLoc,
- diag::err_pp_unterminated_conditional);
- ConditionalStack.pop_back();
- }
-
- // Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result);
-}
-
-// FIXME: We can just grab the last token instead of storing a copy
-// into EofToken.
-void PTHLexer::getEOF(Token& Tok) {
- assert(EofToken.is(tok::eof));
- Tok = EofToken;
-}
-
-void PTHLexer::DiscardToEndOfLine() {
- assert(ParsingPreprocessorDirective && ParsingFilename == false &&
- "Must be in a preprocessing directive!");
-
- // We assume that if the preprocessor wishes to discard to the end of
- // the line that it also means to end the current preprocessor directive.
- ParsingPreprocessorDirective = false;
-
- // Skip tokens by only peeking at their token kind and the flags.
- // We don't need to actually reconstruct full tokens from the token buffer.
- // This saves some copies and it also reduces IdentifierInfo* lookup.
- const unsigned char* p = CurPtr;
- while (true) {
- // Read the token kind. Are we at the end of the file?
- tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
- if (x == tok::eof) break;
-
- // Read the token flags. Are we at the start of the next line?
- Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
- if (y & Token::StartOfLine) break;
-
- // Skip to the next token.
- p += StoredTokenSize;
- }
-
- CurPtr = p;
-}
-
-/// SkipBlock - Used by Preprocessor to skip the current conditional block.
-bool PTHLexer::SkipBlock() {
- using namespace llvm::support;
-
- assert(CurPPCondPtr && "No cached PP conditional information.");
- assert(LastHashTokPtr && "No known '#' token.");
-
- const unsigned char *HashEntryI = nullptr;
- uint32_t TableIdx;
-
- do {
- // Read the token offset from the side-table.
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Read the target table index from the side-table.
- TableIdx = endian::readNext<uint32_t, little, aligned>(CurPPCondPtr);
-
- // Compute the actual memory address of the '#' token data for this entry.
- HashEntryI = TokBuf + Offset;
-
- // Optimization: "Sibling jumping". #if...#else...#endif blocks can
- // contain nested blocks. In the side-table we can jump over these
- // nested blocks instead of doing a linear search if the next "sibling"
- // entry is not at a location greater than LastHashTokPtr.
- if (HashEntryI < LastHashTokPtr && TableIdx) {
- // In the side-table we are still at an entry for a '#' token that
- // is earlier than the last one we saw. Check if the location we would
- // stride gets us closer.
- const unsigned char* NextPPCondPtr =
- PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- // Read where we should jump to.
- const unsigned char *HashEntryJ =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- if (HashEntryJ <= LastHashTokPtr) {
- // Jump directly to the next entry in the side table.
- HashEntryI = HashEntryJ;
- TableIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
- }
- }
- }
- while (HashEntryI < LastHashTokPtr);
- assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
- assert(TableIdx && "No jumping from #endifs.");
-
- // Update our side-table iterator.
- const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
- assert(NextPPCondPtr >= CurPPCondPtr);
- CurPPCondPtr = NextPPCondPtr;
-
- // Read where we should jump to.
- HashEntryI =
- TokBuf + endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
- uint32_t NextIdx = endian::readNext<uint32_t, little, aligned>(NextPPCondPtr);
-
- // By construction NextIdx will be zero if this is a #endif. This is useful
- // to know to obviate lexing another token.
- bool isEndif = NextIdx == 0;
-
- // This case can occur when we see something like this:
- //
- // #if ...
- // /* a comment or nothing */
- // #elif
- //
- // If we are skipping the first #if block it will be the case that CurPtr
- // already points 'elif'. Just return.
-
- if (CurPtr > HashEntryI) {
- assert(CurPtr == HashEntryI + StoredTokenSize);
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif)
- CurPtr += StoredTokenSize * 2;
- else
- LastHashTokPtr = HashEntryI;
-
- return isEndif;
- }
-
- // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
- CurPtr = HashEntryI;
-
- // Update the location of the last observed '#'. This is useful if we
- // are skipping multiple blocks.
- LastHashTokPtr = CurPtr;
-
- // Skip the '#' token.
- assert(((tok::TokenKind)*CurPtr) == tok::hash);
- CurPtr += StoredTokenSize;
-
- // Did we reach a #endif? If so, go ahead and consume that token as well.
- if (isEndif) {
- CurPtr += StoredTokenSize * 2;
- }
-
- return isEndif;
-}
-
-SourceLocation PTHLexer::getSourceLocation() {
- // getSourceLocation is not on the hot path. It is used to get the location
- // of the next token when transitioning back to this lexer when done
- // handling a #included file. Just read the necessary data from the token
- // data buffer to construct the SourceLocation object.
- // NOTE: This is a virtual function; hence it is defined out-of-line.
- using namespace llvm::support;
-
- const unsigned char *OffsetPtr = CurPtr + (StoredTokenSize - 4);
- uint32_t Offset = endian::readNext<uint32_t, little, aligned>(OffsetPtr);
- return FileStartLoc.getLocWithOffset(Offset);
-}
-
-//===----------------------------------------------------------------------===//
-// PTH file lookup: map from strings to file data.
-//===----------------------------------------------------------------------===//
-
-/// PTHFileLookup - This internal data structure is used by the PTHManager
-/// to map from FileEntry objects managed by FileManager to offsets within
-/// the PTH file.
-namespace {
-
-class PTHFileData {
- const uint32_t TokenOff;
- const uint32_t PPCondOff;
-
-public:
- PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
- : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
-
- uint32_t getTokenOffset() const { return TokenOff; }
- uint32_t getPPCondOffset() const { return PPCondOff; }
-};
-
-class PTHFileLookupCommonTrait {
-public:
- using internal_key_type = std::pair<unsigned char, StringRef>;
- using hash_value_type = unsigned;
- using offset_type = unsigned;
-
- static hash_value_type ComputeHash(internal_key_type x) {
- return llvm::djbHash(x.second);
- }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned keyLen =
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d);
- unsigned dataLen = (unsigned) *(d++);
- return std::make_pair(keyLen, dataLen);
- }
-
- static internal_key_type ReadKey(const unsigned char* d, unsigned) {
- unsigned char k = *(d++); // Read the entry kind.
- return std::make_pair(k, (const char*) d);
- }
-};
-
-} // namespace
-
-class PTHManager::PTHFileLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = const FileEntry *;
- using data_type = PTHFileData;
-
- static internal_key_type GetInternalKey(const FileEntry* FE) {
- return std::make_pair((unsigned char) 0x1, FE->getName());
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- return a.first == b.first && a.second == b.second;
- }
-
- static PTHFileData ReadData(const internal_key_type& k,
- const unsigned char* d, unsigned) {
- using namespace llvm::support;
-
- assert(k.first == 0x1 && "Only file lookups can match!");
- uint32_t x = endian::readNext<uint32_t, little, unaligned>(d);
- uint32_t y = endian::readNext<uint32_t, little, unaligned>(d);
- return PTHFileData(x, y);
- }
-};
-
-class PTHManager::PTHStringLookupTrait {
-public:
- using data_type = uint32_t;
- using external_key_type = const std::pair<const char *, unsigned>;
- using internal_key_type = external_key_type;
- using hash_value_type = uint32_t;
- using offset_type = unsigned;
-
- static bool EqualKey(const internal_key_type& a,
- const internal_key_type& b) {
- return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
- : false;
- }
-
- static hash_value_type ComputeHash(const internal_key_type& a) {
- return llvm::djbHash(StringRef(a.first, a.second));
- }
-
- // This hopefully will just get inlined and removed by the optimizer.
- static const internal_key_type&
- GetInternalKey(const external_key_type& x) { return x; }
-
- static std::pair<unsigned, unsigned>
- ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- return std::make_pair(
- (unsigned)endian::readNext<uint16_t, little, unaligned>(d),
- sizeof(uint32_t));
- }
-
- static std::pair<const char*, unsigned>
- ReadKey(const unsigned char* d, unsigned n) {
- assert(n >= 2 && d[n-1] == '\0');
- return std::make_pair((const char*) d, n-1);
- }
-
- static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- using namespace llvm::support;
-
- return endian::readNext<uint32_t, little, unaligned>(d);
- }
-};
-
-//===----------------------------------------------------------------------===//
-// PTHManager methods.
-//===----------------------------------------------------------------------===//
-
-PTHManager::PTHManager(
- std::unique_ptr<const llvm::MemoryBuffer> buf,
- std::unique_ptr<PTHFileLookup> fileLookup, const unsigned char *idDataTable,
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> perIDCache,
- std::unique_ptr<PTHStringIdLookup> stringIdLookup, unsigned numIds,
- const unsigned char *spellingBase, const char *originalSourceFile)
- : Buf(std::move(buf)), PerIDCache(std::move(perIDCache)),
- FileLookup(std::move(fileLookup)), IdDataTable(idDataTable),
- StringIdLookup(std::move(stringIdLookup)), NumIds(numIds),
- SpellingBase(spellingBase), OriginalSourceFile(originalSourceFile) {}
-
-PTHManager::~PTHManager() = default;
-
-static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
- Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Msg;
-}
-
-PTHManager *PTHManager::Create(StringRef file, DiagnosticsEngine &Diags) {
- // Memory map the PTH file.
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileOrErr =
- llvm::MemoryBuffer::getFile(file);
-
- if (!FileOrErr) {
- // FIXME: Add ec.message() to this diag.
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
- std::unique_ptr<llvm::MemoryBuffer> File = std::move(FileOrErr.get());
-
- using namespace llvm::support;
-
- // Get the buffer ranges and check if there are at least three 32-bit
- // words at the end of the file.
- const unsigned char *BufBeg = (const unsigned char*)File->getBufferStart();
- const unsigned char *BufEnd = (const unsigned char*)File->getBufferEnd();
-
- // Check the prologue of the file.
- if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) ||
- memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Read the PTH version.
- const unsigned char *p = BufBeg + (sizeof("cfe-pth"));
- unsigned Version = endian::readNext<uint32_t, little, aligned>(p);
-
- if (Version < PTHManager::Version) {
- InvalidPTH(Diags,
- Version < PTHManager::Version
- ? "PTH file uses an older PTH format that is no longer supported"
- : "PTH file uses a newer PTH format that cannot be read");
- return nullptr;
- }
-
- // Compute the address of the index table at the end of the PTH file.
- const unsigned char *PrologueOffset = p;
-
- if (PrologueOffset >= BufEnd) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Construct the file lookup table. This will be used for mapping from
- // FileEntry*'s to cached tokens.
- const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
- const unsigned char *FileTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(FileTableOffset);
-
- if (!(FileTable > BufBeg && FileTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr; // FIXME: Proper error diagnostic?
- }
-
- std::unique_ptr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
-
- // Warn if the PTH file is empty. We still want to create a PTHManager
- // as the PTH could be used with -include-pth.
- if (FL->isEmpty())
- InvalidPTH(Diags, "PTH file contains no cached source data");
-
- // Get the location of the table mapping from persistent ids to the
- // data needed to reconstruct identifiers.
- const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
- const unsigned char *IData =
- BufBeg + endian::readNext<uint32_t, little, aligned>(IDTableOffset);
-
- if (!(IData >= BufBeg && IData < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the location of the hashtable mapping between strings and
- // persistent IDs.
- const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
- const unsigned char *StringIdTable =
- BufBeg + endian::readNext<uint32_t, little, aligned>(StringIdTableOffset);
- if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- std::unique_ptr<PTHStringIdLookup> SL(
- PTHStringIdLookup::Create(StringIdTable, BufBeg));
-
- // Get the location of the spelling cache.
- const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
- const unsigned char *spellingBase =
- BufBeg + endian::readNext<uint32_t, little, aligned>(spellingBaseOffset);
- if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
- Diags.Report(diag::err_invalid_pth_file) << file;
- return nullptr;
- }
-
- // Get the number of IdentifierInfos and pre-allocate the identifier cache.
- uint32_t NumIds = endian::readNext<uint32_t, little, aligned>(IData);
-
- // Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
- // so that we in the best case only zero out memory once when the OS returns
- // us new pages.
- std::unique_ptr<IdentifierInfo *[], llvm::FreeDeleter> PerIDCache;
-
- if (NumIds) {
- PerIDCache.reset((IdentifierInfo **)calloc(NumIds, sizeof(PerIDCache[0])));
- if (!PerIDCache) {
- InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
- return nullptr;
- }
- }
-
- // Compute the address of the original source file.
- const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
- unsigned len =
- endian::readNext<uint16_t, little, unaligned>(originalSourceBase);
- if (!len) originalSourceBase = nullptr;
-
- // Create the new PTHManager.
- return new PTHManager(std::move(File), std::move(FL), IData,
- std::move(PerIDCache), std::move(SL), NumIds,
- spellingBase, (const char *)originalSourceBase);
-}
-
-IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
- using namespace llvm::support;
-
- // Look in the PTH file for the string data for the IdentifierInfo object.
- const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
- const unsigned char *IDData =
- (const unsigned char *)Buf->getBufferStart() +
- endian::readNext<uint32_t, little, aligned>(TableEntry);
- assert(IDData < (const unsigned char*)Buf->getBufferEnd());
-
- // Allocate the object.
- std::pair<IdentifierInfo,const unsigned char*> *Mem =
- Alloc.Allocate<std::pair<IdentifierInfo, const unsigned char *>>();
-
- Mem->second = IDData;
- assert(IDData[0] != '\0');
- IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
-
- // Store the new IdentifierInfo in the cache.
- PerIDCache[PersistentID] = II;
- assert(II->getNameStart() && II->getNameStart()[0] != '\0');
- return II;
-}
-
-IdentifierInfo* PTHManager::get(StringRef Name) {
- // Double check our assumption that the last character isn't '\0'.
- assert(Name.empty() || Name.back() != '\0');
- PTHStringIdLookup::iterator I =
- StringIdLookup->find(std::make_pair(Name.data(), Name.size()));
- if (I == StringIdLookup->end()) // No identifier found?
- return nullptr;
-
- // Match found. Return the identifier!
- assert(*I > 0);
- return GetIdentifierInfo(*I-1);
-}
-
-PTHLexer *PTHManager::CreateLexer(FileID FID) {
- const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
- if (!FE)
- return nullptr;
-
- using namespace llvm::support;
-
- // Lookup the FileEntry object in our file lookup data structure. It will
- // return a variant that indicates whether or not there is an offset within
- // the PTH file that contains cached tokens.
- PTHFileLookup::iterator I = FileLookup->find(FE);
-
- if (I == FileLookup->end()) // No tokens available?
- return nullptr;
-
- const PTHFileData& FileData = *I;
-
- const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
- // Compute the offset of the token data within the buffer.
- const unsigned char* data = BufStart + FileData.getTokenOffset();
-
- // Get the location of pp-conditional table.
- const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
- uint32_t Len = endian::readNext<uint32_t, little, aligned>(ppcond);
- if (Len == 0) ppcond = nullptr;
-
- assert(PP && "No preprocessor set yet!");
- return new PTHLexer(*PP, FID, data, ppcond, *this);
-}
-
-//===----------------------------------------------------------------------===//
-// 'stat' caching.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class PTHStatData {
-public:
- uint64_t Size;
- time_t ModTime;
- llvm::sys::fs::UniqueID UniqueID;
- const bool HasData = false;
- bool IsDirectory;
-
- PTHStatData() = default;
- PTHStatData(uint64_t Size, time_t ModTime, llvm::sys::fs::UniqueID UniqueID,
- bool IsDirectory)
- : Size(Size), ModTime(ModTime), UniqueID(UniqueID), HasData(true),
- IsDirectory(IsDirectory) {}
-};
-
-class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
-public:
- using external_key_type = StringRef; // const char*
- using data_type = PTHStatData;
-
- static internal_key_type GetInternalKey(StringRef path) {
- // The key 'kind' doesn't matter here because it is ignored in EqualKey.
- return std::make_pair((unsigned char) 0x0, path);
- }
-
- static bool EqualKey(internal_key_type a, internal_key_type b) {
- // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
- // just the paths.
- return a.second == b.second;
- }
-
- static data_type ReadData(const internal_key_type& k, const unsigned char* d,
- unsigned) {
- if (k.first /* File or Directory */) {
- bool IsDirectory = true;
- if (k.first == 0x1 /* File */) {
- IsDirectory = false;
- d += 4 * 2; // Skip the first 2 words.
- }
-
- using namespace llvm::support;
-
- uint64_t File = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Device = endian::readNext<uint64_t, little, unaligned>(d);
- llvm::sys::fs::UniqueID UniqueID(Device, File);
- time_t ModTime = endian::readNext<uint64_t, little, unaligned>(d);
- uint64_t Size = endian::readNext<uint64_t, little, unaligned>(d);
- return data_type(Size, ModTime, UniqueID, IsDirectory);
- }
-
- // Negative stat. Don't read anything.
- return data_type();
- }
-};
-
-} // namespace
-
-namespace clang {
-
-class PTHStatCache : public FileSystemStatCache {
- using CacheTy = llvm::OnDiskChainedHashTable<PTHStatLookupTrait>;
-
- CacheTy Cache;
-
-public:
- PTHStatCache(PTHManager::PTHFileLookup &FL)
- : Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
- FL.getBase()) {}
-
- LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
- std::unique_ptr<vfs::File> *F,
- vfs::FileSystem &FS) override {
- // Do the lookup for the file's data in the PTH file.
- CacheTy::iterator I = Cache.find(Path);
-
- // If we don't get a hit in the PTH file just forward to 'stat'.
- if (I == Cache.end())
- return statChained(Path, Data, isFile, F, FS);
-
- const PTHStatData &D = *I;
-
- if (!D.HasData)
- return CacheMissing;
-
- Data.Name = Path;
- Data.Size = D.Size;
- Data.ModTime = D.ModTime;
- Data.UniqueID = D.UniqueID;
- Data.IsDirectory = D.IsDirectory;
- Data.IsNamedPipe = false;
- Data.InPCH = true;
-
- return CacheExists;
- }
-};
-
-} // namespace clang
-
-std::unique_ptr<FileSystemStatCache> PTHManager::createStatCache() {
- return llvm::make_unique<PTHStatCache>(*FileLookup);
-}
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 37c0a23646c5..575935119f6f 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -31,7 +31,6 @@
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorLexer.h"
-#include "clang/Lex/PTHLexer.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
@@ -404,10 +403,7 @@ void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
void Preprocessor::HandlePragmaMark() {
assert(CurPPLexer && "No current lexer?");
- if (CurLexer)
- CurLexer->ReadToEndOfLine();
- else
- CurPTHLexer->DiscardToEndOfLine();
+ CurLexer->ReadToEndOfLine();
}
/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
@@ -810,12 +806,6 @@ void Preprocessor::HandlePragmaModuleBuild(Token &Tok) {
DiscardUntilEndOfDirective();
}
- if (CurPTHLexer) {
- // FIXME: Support this somehow?
- Diag(Loc, diag::err_pp_module_build_pth);
- return;
- }
-
CurLexer->LexingRawMode = true;
auto TryConsumeIdentifier = [&](StringRef Ident) -> bool {
@@ -876,6 +866,37 @@ void Preprocessor::HandlePragmaModuleBuild(Token &Tok) {
StringRef(Start, End - Start));
}
+void Preprocessor::HandlePragmaHdrstop(Token &Tok) {
+ Lex(Tok);
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::warn_pp_hdrstop_filename_ignored);
+
+ std::string FileName;
+ if (!LexStringLiteral(Tok, FileName, "pragma hdrstop", false))
+ return;
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected) << tok::r_paren;
+ return;
+ }
+ Lex(Tok);
+ }
+ if (Tok.isNot(tok::eod))
+ Diag(Tok.getLocation(), diag::ext_pp_extra_tokens_at_eol)
+ << "pragma hdrstop";
+
+ if (creatingPCHWithPragmaHdrStop() &&
+ SourceMgr.isInMainFile(Tok.getLocation())) {
+ assert(CurLexer && "no lexer for #pragma hdrstop processing");
+ Token &Result = Tok;
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer->cutOffLexing();
+ }
+ if (usingPCHWithPragmaHdrStop())
+ SkippingUntilPragmaHdrStop = false;
+}
+
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
@@ -1099,10 +1120,6 @@ struct PragmaDebugHandler : public PragmaHandler {
}
void HandleCaptured(Preprocessor &PP) {
- // Skip if emitting preprocessed output.
- if (PP.isPreprocessedOutput())
- return;
-
Token Tok;
PP.LexUnexpandedToken(Tok);
@@ -1220,6 +1237,15 @@ public:
}
};
+/// "\#pragma hdrstop [<header-name-string>]"
+struct PragmaHdrstopHandler : public PragmaHandler {
+ PragmaHdrstopHandler() : PragmaHandler("hdrstop") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) override {
+ PP.HandlePragmaHdrstop(DepToken);
+ }
+};
+
/// "\#pragma warning(...)". MSVC's diagnostics do not map cleanly to clang's
/// diagnostics, so we don't really implement this pragma. We parse it and
/// ignore it to avoid -Wunknown-pragma warnings.
@@ -1799,6 +1825,7 @@ void Preprocessor::RegisterBuiltinPragmas() {
if (LangOpts.MicrosoftExt) {
AddPragmaHandler(new PragmaWarningHandler());
AddPragmaHandler(new PragmaIncludeAliasHandler());
+ AddPragmaHandler(new PragmaHdrstopHandler());
}
// Pragmas added by plugins
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index def47b2f1095..047a4caaca73 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -44,8 +44,6 @@
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
-#include "clang/Lex/PTHLexer.h"
-#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorLexer.h"
@@ -149,6 +147,10 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
Ident_AbnormalTermination = nullptr;
}
+ // If using a PCH where a #pragma hdrstop is expected, start skipping tokens.
+ if (usingPCHWithPragmaHdrStop())
+ SkippingUntilPragmaHdrStop = true;
+
// If using a PCH with a through header, start skipping tokens.
if (!this->PPOpts->PCHThroughHeader.empty() &&
!this->PPOpts->ImplicitPCHInclude.empty())
@@ -220,11 +222,6 @@ void Preprocessor::FinalizeForModelFile() {
PragmaHandlers = std::move(PragmaHandlersBackup);
}
-void Preprocessor::setPTHManager(PTHManager* pm) {
- PTH.reset(pm);
- FileMgr.addStatCache(PTH->createStatCache());
-}
-
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
<< getSpelling(Tok) << "'";
@@ -250,7 +247,7 @@ void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
}
void Preprocessor::DumpLocation(SourceLocation Loc) const {
- Loc.dump(SourceMgr);
+ Loc.print(llvm::errs(), SourceMgr);
}
void Preprocessor::DumpMacro(const MacroInfo &MI) const {
@@ -375,8 +372,6 @@ StringRef Preprocessor::getLastMacroWithSpelling(
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
CurLexerKind = CLK_Lexer;
- else if (CurPTHLexer)
- CurLexerKind = CLK_PTHLexer;
else if (CurTokenLexer)
CurLexerKind = CLK_TokenLexer;
else
@@ -441,6 +436,13 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
return false;
}
+void Preprocessor::CodeCompleteIncludedFile(llvm::StringRef Dir,
+ bool IsAngled) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteIncludedFile(Dir, IsAngled);
+ setCodeCompletionReached();
+}
+
void Preprocessor::CodeCompleteNaturalLanguage() {
if (CodeComplete)
CodeComplete->CodeCompleteNaturalLanguage();
@@ -576,8 +578,9 @@ void Preprocessor::EnterMainSourceFile() {
}
// Skip tokens from the Predefines and if needed the main file.
- if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader)
- SkipTokensUntilPCHThroughHeader();
+ if ((usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) ||
+ (usingPCHWithPragmaHdrStop() && SkippingUntilPragmaHdrStop))
+ SkipTokensWhileUsingPCH();
}
void Preprocessor::setPCHThroughHeaderFileID(FileID FID) {
@@ -602,12 +605,23 @@ bool Preprocessor::usingPCHWithThroughHeader() {
PCHThroughHeaderFileID.isValid();
}
-/// Skip tokens until after the #include of the through header.
-/// Tokens in the predefines file and the main file may be skipped. If the end
-/// of the predefines file is reached, skipping continues into the main file.
-/// If the end of the main file is reached, it's a fatal error.
-void Preprocessor::SkipTokensUntilPCHThroughHeader() {
+bool Preprocessor::creatingPCHWithPragmaHdrStop() {
+ return TUKind == TU_Prefix && PPOpts->PCHWithHdrStop;
+}
+
+bool Preprocessor::usingPCHWithPragmaHdrStop() {
+ return TUKind != TU_Prefix && PPOpts->PCHWithHdrStop;
+}
+
+/// Skip tokens until after the #include of the through header or
+/// until after a #pragma hdrstop is seen. Tokens in the predefines file
+/// and the main file may be skipped. If the end of the predefines file
+/// is reached, skipping continues into the main file. If the end of the
+/// main file is reached, it's a fatal error.
+void Preprocessor::SkipTokensWhileUsingPCH() {
bool ReachedMainFileEOF = false;
+ bool UsingPCHThroughHeader = SkippingUntilPCHThroughHeader;
+ bool UsingPragmaHdrStop = SkippingUntilPragmaHdrStop;
Token Tok;
while (true) {
bool InPredefines = (CurLexer->getFileID() == getPredefinesFileID());
@@ -616,12 +630,18 @@ void Preprocessor::SkipTokensUntilPCHThroughHeader() {
ReachedMainFileEOF = true;
break;
}
- if (!SkippingUntilPCHThroughHeader)
+ if (UsingPCHThroughHeader && !SkippingUntilPCHThroughHeader)
break;
+ if (UsingPragmaHdrStop && !SkippingUntilPragmaHdrStop)
+ break;
+ }
+ if (ReachedMainFileEOF) {
+ if (UsingPCHThroughHeader)
+ Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
+ << PPOpts->PCHThroughHeader << 1;
+ else if (!PPOpts->PCHWithHdrStopCreate)
+ Diag(SourceLocation(), diag::err_pp_pragma_hdrstop_not_seen);
}
- if (ReachedMainFileEOF)
- Diag(SourceLocation(), diag::err_pp_through_header_not_seen)
- << PPOpts->PCHThroughHeader << 1;
}
void Preprocessor::replayPreambleConditionalStack() {
@@ -848,9 +868,6 @@ void Preprocessor::Lex(Token &Result) {
case CLK_Lexer:
ReturnedToken = CurLexer->Lex(Result);
break;
- case CLK_PTHLexer:
- ReturnedToken = CurPTHLexer->Lex(Result);
- break;
case CLK_TokenLexer:
ReturnedToken = CurTokenLexer->Lex(Result);
break;
@@ -868,6 +885,7 @@ void Preprocessor::Lex(Token &Result) {
if (Result.is(tok::code_completion) && Result.getIdentifierInfo()) {
// Remember the identifier before code completion token.
setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
+ setCodeCompletionTokenRange(Result.getLocation(), Result.getEndLoc());
// Set IdenfitierInfo to null to avoid confusing code that handles both
// identifiers and completion tokens.
Result.setIdentifierInfo(nullptr);
@@ -913,7 +931,7 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
// If we have a non-empty module path, load the named module.
if (!ModuleImportPath.empty()) {
// Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarachy separator. Flatten such module names now.
+ // a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
index ec73479cb54f..f810c28ccdf1 100644
--- a/lib/Lex/TokenConcatenation.cpp
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -67,7 +67,7 @@ bool TokenConcatenation::IsIdentifierStringPrefix(const Token &Tok) const {
return IsStringPrefix(StringRef(PP.getSpelling(Tok)), LangOpts.CPlusPlus11);
}
-TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
+TokenConcatenation::TokenConcatenation(const Preprocessor &pp) : PP(pp) {
memset(TokenInfo, 0, sizeof(TokenInfo));
// These tokens have custom code in AvoidConcat.
@@ -126,7 +126,7 @@ TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
/// GetFirstChar - Get the first character of the token \arg Tok,
/// avoiding calls to getSpelling where possible.
-static char GetFirstChar(Preprocessor &PP, const Token &Tok) {
+static char GetFirstChar(const Preprocessor &PP, const Token &Tok) {
if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
// Avoid spelling identifiers, the most common form of token.
return II->getNameStart()[0];
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index e71b5a9d1e6d..f7703b1bfd8a 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -141,26 +141,26 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
CleanupParser(ParseOP.get());
S.getPreprocessor().EnterMainSourceFile();
- if (!S.getPreprocessor().getCurrentLexer()) {
- // If a PCH through header is specified that does not have an include in
- // the source, there won't be any tokens or a Lexer.
- return;
- }
-
- P.Initialize();
-
- Parser::DeclGroupPtrTy ADecl;
ExternalASTSource *External = S.getASTContext().getExternalSource();
if (External)
External->StartTranslationUnit(Consumer);
- for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
- AtEOF = P.ParseTopLevelDecl(ADecl)) {
- // If we got a null return and something *was* parsed, ignore it. This
- // is due to a top-level semicolon, an action override, or a parse error
- // skipping something.
- if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
- return;
+ // If a PCH through header is specified that does not have an include in
+ // the source, or a PCH is being created with #pragma hdrstop with nothing
+ // after the pragma, there won't be any tokens or a Lexer.
+ bool HaveLexer = S.getPreprocessor().getCurrentLexer();
+
+ if (HaveLexer) {
+ P.Initialize();
+ Parser::DeclGroupPtrTy ADecl;
+ for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
+ AtEOF = P.ParseTopLevelDecl(ADecl)) {
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
+ return;
+ }
}
// Process any TopLevelDecls generated by #pragma weak.
@@ -179,7 +179,7 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
std::swap(OldCollectStats, S.CollectStats);
if (PrintStats) {
llvm::errs() << "\nSTATISTICS:\n";
- P.getActions().PrintStats();
+ if (HaveLexer) P.getActions().PrintStats();
S.getASTContext().PrintStats();
Decl::PrintStats();
Stmt::PrintStats();
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 5898120cab46..fde3ce00f830 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -595,7 +595,7 @@ void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
// to X" within the optional brace-or-equal-initializer. It shall not
// appear elsewhere in the member-declarator.
Sema::CXXThisScopeRAII ThisScope(Actions, Class.TagOrTemplate,
- /*TypeQuals=*/(unsigned)0);
+ Qualifiers());
for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
@@ -728,7 +728,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
case tok::semi:
if (StopAtSemi)
return false;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
// consume this token.
Toks.push_back(Tok);
@@ -1108,13 +1108,13 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::greatergreater:
if (!getLangOpts().CPlusPlus11)
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::greater:
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
@@ -1219,7 +1219,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case tok::semi:
if (CIK == CIK_DefaultInitializer)
return true; // End of the default initializer.
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
consume_token:
Toks.push_back(Tok);
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 6e57c7bbba91..298a2bad56c6 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -24,7 +24,6 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
@@ -754,7 +753,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
case tok::kw__Null_unspecified: {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
Diag(AttrNameLoc, diag::ext_nullability)
<< AttrName;
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
@@ -1001,6 +1000,21 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
continue;
}
+ if (Keyword == Ident_deprecated && Platform->Ident &&
+ Platform->Ident->isStr("swift")) {
+ // For swift, we deprecate for all versions.
+ if (Changes[Deprecated].KeywordLoc.isValid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword
+ << SourceRange(Changes[Deprecated].KeywordLoc);
+ }
+
+ Changes[Deprecated].KeywordLoc = KeywordLoc;
+ // Use a fake version here.
+ Changes[Deprecated].Version = VersionTuple(1);
+ continue;
+ }
+
if (Tok.isNot(tok::equal)) {
Diag(Tok, diag::err_expected_after) << Keyword << tok::equal;
SkipUntil(tok::r_paren, StopAtSemi);
@@ -1412,7 +1426,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
RecordDecl *RD = dyn_cast_or_null<RecordDecl>(D->getDeclContext());
// Allow 'this' within late-parsed attributes.
- Sema::CXXThisScopeRAII ThisScope(Actions, RD, /*TypeQuals=*/0,
+ Sema::CXXThisScopeRAII ThisScope(Actions, RD, Qualifiers(),
ND && ND->isCXXInstanceMember());
if (LA.Decls.size() == 1) {
@@ -1940,7 +1954,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
Diag(Loc, diag::err_c11_noreturn_misplaced)
<< (Fixit ? FixItHint::CreateRemoval(Loc) : FixItHint())
- << (Fixit ? FixItHint::CreateInsertion(D.getLocStart(), "_Noreturn ")
+ << (Fixit ? FixItHint::CreateInsertion(D.getBeginLoc(), "_Noreturn ")
: FixItHint());
}
}
@@ -2302,20 +2316,28 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
llvm::function_ref<void()> ExprListCompleter;
auto ThisVarDecl = dyn_cast_or_null<VarDecl>(ThisDecl);
auto ConstructorCompleter = [&, ThisVarDecl] {
- Actions.CodeCompleteConstructor(
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs);
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
};
if (ThisVarDecl) {
// ParseExpressionList can sometimes succeed even when ThisDecl is not
// VarDecl. This is an error and it is reported in a call to
// Actions.ActOnInitializerError(). However, we call
- // CodeCompleteConstructor only on VarDecls, falling back to default
- // completer in other cases.
+ // ProduceConstructorSignatureHelp only on VarDecls, falling back to
+ // default completer in other cases.
ExprListCompleter = ConstructorCompleter;
}
if (ParseExpressionList(Exprs, CommaLocs, ExprListCompleter)) {
+ if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ CalledSignatureHelp = true;
+ }
Actions.ActOnInitializerError(ThisDecl);
SkipUntil(tok::r_paren, StopAtSemi);
} else {
@@ -2841,7 +2863,7 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
return false;
const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
- Diag(PP.getLocForEndOfToken(DS.getRepAsDecl()->getLocEnd()),
+ Diag(PP.getLocForEndOfToken(DS.getRepAsDecl()->getEndLoc()),
diag::err_expected_after)
<< DeclSpec::getSpecifierName(DS.getTypeSpecType(), PPol) << tok::semi;
@@ -3282,7 +3304,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C supports type arguments and protocol references
// following an Objective-C object or object pointer
// type. Handle either one of them.
- if (Tok.is(tok::less) && getLangOpts().ObjC1) {
+ if (Tok.is(tok::less) && getLangOpts().ObjC) {
SourceLocation NewEndLoc;
TypeResult NewTypeRep = parseObjCTypeArgsAndProtocolQualifiers(
Loc, TypeRep, /*consumeLastToken=*/true,
@@ -3804,7 +3826,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// GCC ObjC supports types like "<SomeProtocol>" as a synonym for
// "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
// but we support it.
- if (DS.hasTypeSpecifier() || !getLangOpts().ObjC1)
+ if (DS.hasTypeSpecifier() || !getLangOpts().ObjC)
goto DoneWithDeclSpec;
SourceLocation StartLoc = Tok.getLocation();
@@ -3830,7 +3852,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
assert(PrevSpec && "Method did not return previous specifier!");
assert(DiagID);
- if (DiagID == diag::ext_duplicate_declspec)
+ if (DiagID == diag::ext_duplicate_declspec ||
+ DiagID == diag::ext_warn_duplicate_declspec)
Diag(Tok, DiagID)
<< PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
else if (DiagID == diag::err_opencl_unknown_type_specifier) {
@@ -4145,15 +4168,11 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Enum definitions should not be parsed in a trailing-return-type.
bool AllowDeclaration = DSC != DeclSpecContext::DSC_trailing;
- bool AllowFixedUnderlyingType = AllowDeclaration &&
- (getLangOpts().CPlusPlus11 || getLangOpts().MicrosoftExt ||
- getLangOpts().ObjC2);
-
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLangOpts().CPlusPlus) {
// "enum foo : bar;" is not a potential typo for "enum foo::bar;"
// if a fixed underlying type is allowed.
- ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
+ ColonProtectionRAIIObject X(*this, AllowDeclaration);
CXXScopeSpec Spec;
if (ParseOptionalCXXScopeSpecifier(Spec, nullptr,
@@ -4175,7 +4194,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Must have either 'enum name' or 'enum {...}'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
- !(AllowFixedUnderlyingType && Tok.is(tok::colon))) {
+ !(AllowDeclaration && Tok.is(tok::colon))) {
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
// Skip the rest of this declarator, up until the comma or semicolon.
@@ -4208,7 +4227,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Parse the fixed underlying type.
bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
- if (AllowFixedUnderlyingType && Tok.is(tok::colon)) {
+ if (AllowDeclaration && Tok.is(tok::colon)) {
bool PossibleBitfield = false;
if (CanBeBitfield) {
// If we're in class scope, this can either be an enum declaration with
@@ -4268,13 +4287,15 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SourceRange Range;
BaseType = ParseTypeName(&Range);
- if (getLangOpts().CPlusPlus11) {
- Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
- } else if (!getLangOpts().ObjC2) {
- if (getLangOpts().CPlusPlus)
- Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type) << Range;
+ if (!getLangOpts().ObjC) {
+ if (getLangOpts().CPlusPlus11)
+ Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ else if (getLangOpts().CPlusPlus)
+ Diag(StartLoc, diag::ext_cxx11_enum_fixed_underlying_type);
+ else if (getLangOpts().MicrosoftExt)
+ Diag(StartLoc, diag::ext_ms_c_enum_fixed_underlying_type);
else
- Diag(StartLoc, diag::ext_c_enum_fixed_underlying_type) << Range;
+ Diag(StartLoc, diag::ext_clang_c_enum_fixed_underlying_type);
}
}
}
@@ -4654,7 +4675,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::identifier: // foo::bar
if (TryAltiVecVectorToken())
return true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
@@ -4733,7 +4754,7 @@ bool Parser::isTypeSpecifierQualifier() {
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLangOpts().ObjC1;
+ return getLangOpts().ObjC;
case tok::kw___cdecl:
case tok::kw___stdcall:
@@ -4784,11 +4805,11 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
- if (getLangOpts().ObjC1 && NextToken().is(tok::period))
+ if (getLangOpts().ObjC && NextToken().is(tok::period))
return false;
if (TryAltiVecVectorToken())
return true;
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_decltype: // decltype(T())::type
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
@@ -4914,7 +4935,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
- return getLangOpts().ObjC1;
+ return getLangOpts().ObjC;
// typedef-name
case tok::annot_typename:
@@ -5363,7 +5384,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
D.AddTypeInfo(DeclaratorChunk::getMemberPointer(
- SS, DS.getTypeQualifiers(), DS.getLocEnd()),
+ SS, DS.getTypeQualifiers(), DS.getEndLoc()),
std::move(DS.getAttributes()),
/* Don't replace range end. */ SourceLocation());
return;
@@ -5755,7 +5776,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.getContext() == DeclaratorContext::MemberContext) {
// Objective-C++: Detect C++ keywords and try to prevent further errors by
// treating these keyword as valid member names.
- if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus &&
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->isCPlusPlusKeyword(getLangOpts())) {
Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
@@ -6051,9 +6072,6 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
DeclSpec DS(AttrFactory);
bool RefQualifierIsLValueRef = true;
SourceLocation RefQualifierLoc;
- SourceLocation ConstQualifierLoc;
- SourceLocation VolatileQualifierLoc;
- SourceLocation RestrictQualifierLoc;
ExceptionSpecificationType ESpecType = EST_None;
SourceRange ESpecRange;
SmallVector<ParsedType, 2> DynamicExceptions;
@@ -6116,9 +6134,6 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
}));
if (!DS.getSourceRange().getEnd().isInvalid()) {
EndLoc = DS.getSourceRange().getEnd();
- ConstQualifierLoc = DS.getConstSpecLoc();
- VolatileQualifierLoc = DS.getVolatileSpecLoc();
- RestrictQualifierLoc = DS.getRestrictSpecLoc();
}
// Parse ref-qualifier[opt].
@@ -6140,13 +6155,14 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
: D.getContext() == DeclaratorContext::FileContext &&
D.getCXXScopeSpec().isValid() &&
Actions.CurContext->isRecord());
- Sema::CXXThisScopeRAII ThisScope(Actions,
- dyn_cast<CXXRecordDecl>(Actions.CurContext),
- DS.getTypeQualifiers() |
- (D.getDeclSpec().isConstexprSpecified() &&
- !getLangOpts().CPlusPlus14
- ? Qualifiers::Const : 0),
- IsCXX11MemberFunction);
+
+ Qualifiers Q = Qualifiers::fromCVRUMask(DS.getTypeQualifiers());
+ if (D.getDeclSpec().isConstexprSpecified() && !getLangOpts().CPlusPlus14)
+ Q.addConst();
+
+ Sema::CXXThisScopeRAII ThisScope(
+ Actions, dyn_cast<CXXRecordDecl>(Actions.CurContext), Q,
+ IsCXX11MemberFunction);
// Parse exception-specification[opt].
bool Delayed = D.isFirstDeclarationOfMember() &&
@@ -6217,15 +6233,13 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
D.AddTypeInfo(DeclaratorChunk::getFunction(
HasProto, IsAmbiguous, LParenLoc, ParamInfo.data(),
ParamInfo.size(), EllipsisLoc, RParenLoc,
- DS.getTypeQualifiers(), RefQualifierIsLValueRef,
- RefQualifierLoc, ConstQualifierLoc, VolatileQualifierLoc,
- RestrictQualifierLoc,
- /*MutableLoc=*/SourceLocation(), ESpecType, ESpecRange,
- DynamicExceptions.data(), DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
+ RefQualifierIsLValueRef, RefQualifierLoc,
+ /*MutableLoc=*/SourceLocation(),
+ ESpecType, ESpecRange, DynamicExceptions.data(),
+ DynamicExceptionRanges.data(), DynamicExceptions.size(),
NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
ExceptionSpecTokens, DeclsInPrototype, StartLoc,
- LocalEndLoc, D, TrailingReturnType),
+ LocalEndLoc, D, TrailingReturnType, &DS),
std::move(FnAttrs), EndLoc);
}
@@ -6693,7 +6707,7 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
if (NeedParens) {
// Create a DeclaratorChunk for the inserted parens.
- SourceLocation EndLoc = PP.getLocForEndOfToken(D.getLocEnd());
+ SourceLocation EndLoc = PP.getLocForEndOfToken(D.getEndLoc());
D.AddTypeInfo(DeclaratorChunk::getParen(SuggestParenLoc, EndLoc),
SourceLocation());
}
@@ -6709,11 +6723,11 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
if (!D.getIdentifier() && !NeedParens)
return;
- SourceLocation EndBracketLoc = TempDeclarator.getLocEnd();
+ SourceLocation EndBracketLoc = TempDeclarator.getEndLoc();
// Generate the move bracket error message.
SourceRange BracketRange(StartBracketLoc, EndBracketLoc);
- SourceLocation EndLoc = PP.getLocForEndOfToken(D.getLocEnd());
+ SourceLocation EndLoc = PP.getLocForEndOfToken(D.getEndLoc());
if (NeedParens) {
Diag(EndLoc, diag::err_brackets_go_after_unqualified_id)
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 9ba44d07aba8..f8359f1e87d8 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -24,7 +24,6 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
@@ -33,24 +32,25 @@ using namespace clang;
/// may either be a top level namespace or a block-level namespace alias. If
/// there was an inline keyword, it has already been parsed.
///
-/// namespace-definition: [C++ 7.3: basic.namespace]
+/// namespace-definition: [C++: namespace.def]
/// named-namespace-definition
/// unnamed-namespace-definition
+/// nested-namespace-definition
+///
+/// named-namespace-definition:
+/// 'inline'[opt] 'namespace' attributes[opt] identifier '{'
+/// namespace-body '}'
///
/// unnamed-namespace-definition:
/// 'inline'[opt] 'namespace' attributes[opt] '{' namespace-body '}'
///
-/// named-namespace-definition:
-/// original-namespace-definition
-/// extension-namespace-definition
-///
-/// original-namespace-definition:
-/// 'inline'[opt] 'namespace' identifier attributes[opt]
-/// '{' namespace-body '}'
+/// nested-namespace-definition:
+/// 'namespace' enclosing-namespace-specifier '::' 'inline'[opt]
+/// identifier '{' namespace-body '}'
///
-/// extension-namespace-definition:
-/// 'inline'[opt] 'namespace' original-namespace-name
-/// '{' namespace-body '}'
+/// enclosing-namespace-specifier:
+/// identifier
+/// enclosing-namespace-specifier '::' 'inline'[opt] identifier
///
/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
/// 'namespace' identifier '=' qualified-namespace-specifier ';'
@@ -70,9 +70,8 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SourceLocation IdentLoc;
IdentifierInfo *Ident = nullptr;
- std::vector<SourceLocation> ExtraIdentLoc;
- std::vector<IdentifierInfo*> ExtraIdent;
- std::vector<SourceLocation> ExtraNamespaceLoc;
+ InnerNamespaceInfoList ExtraNSs;
+ SourceLocation FirstNestedInlineLoc;
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation attrLoc;
@@ -88,15 +87,29 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
IdentLoc = ConsumeToken(); // eat the identifier.
- while (Tok.is(tok::coloncolon) && NextToken().is(tok::identifier)) {
- ExtraNamespaceLoc.push_back(ConsumeToken());
- ExtraIdent.push_back(Tok.getIdentifierInfo());
- ExtraIdentLoc.push_back(ConsumeToken());
+ while (Tok.is(tok::coloncolon) &&
+ (NextToken().is(tok::identifier) ||
+ (NextToken().is(tok::kw_inline) &&
+ GetLookAheadToken(2).is(tok::identifier)))) {
+
+ InnerNamespaceInfo Info;
+ Info.NamespaceLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_inline)) {
+ Info.InlineLoc = ConsumeToken();
+ if (FirstNestedInlineLoc.isInvalid())
+ FirstNestedInlineLoc = Info.InlineLoc;
+ }
+
+ Info.Ident = Tok.getIdentifierInfo();
+ Info.IdentLoc = ConsumeToken();
+
+ ExtraNSs.push_back(Info);
}
}
// A nested namespace definition cannot have attributes.
- if (!ExtraNamespaceLoc.empty() && attrLoc.isValid())
+ if (!ExtraNSs.empty() && attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
// Read label attributes, if present.
@@ -138,13 +151,21 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
return nullptr;
}
- if (ExtraIdent.empty()) {
+ if (ExtraNSs.empty()) {
// Normal namespace definition, not a nested-namespace-definition.
} else if (InlineLoc.isValid()) {
Diag(InlineLoc, diag::err_inline_nested_namespace_definition);
+ } else if (getLangOpts().CPlusPlus2a) {
+ Diag(ExtraNSs[0].NamespaceLoc,
+ diag::warn_cxx14_compat_nested_namespace_definition);
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc,
+ diag::warn_cxx17_compat_inline_nested_namespace_definition);
} else if (getLangOpts().CPlusPlus17) {
- Diag(ExtraNamespaceLoc[0],
+ Diag(ExtraNSs[0].NamespaceLoc,
diag::warn_cxx14_compat_nested_namespace_definition);
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc, diag::ext_inline_nested_namespace_definition);
} else {
TentativeParsingAction TPA(*this);
SkipUntil(tok::r_brace, StopBeforeMatch);
@@ -152,26 +173,34 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
TPA.Revert();
if (!rBraceToken.is(tok::r_brace)) {
- Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
- << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
+ << SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc);
} else {
std::string NamespaceFix;
- for (std::vector<IdentifierInfo*>::iterator I = ExtraIdent.begin(),
- E = ExtraIdent.end(); I != E; ++I) {
- NamespaceFix += " { namespace ";
- NamespaceFix += (*I)->getName();
+ for (const auto &ExtraNS : ExtraNSs) {
+ NamespaceFix += " { ";
+ if (ExtraNS.InlineLoc.isValid())
+ NamespaceFix += "inline ";
+ NamespaceFix += "namespace ";
+ NamespaceFix += ExtraNS.Ident->getName();
}
std::string RBraces;
- for (unsigned i = 0, e = ExtraIdent.size(); i != e; ++i)
+ for (unsigned i = 0, e = ExtraNSs.size(); i != e; ++i)
RBraces += "} ";
- Diag(ExtraNamespaceLoc[0], diag::ext_nested_namespace_definition)
- << FixItHint::CreateReplacement(SourceRange(ExtraNamespaceLoc.front(),
- ExtraIdentLoc.back()),
- NamespaceFix)
+ Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
+ << FixItHint::CreateReplacement(
+ SourceRange(ExtraNSs.front().NamespaceLoc,
+ ExtraNSs.back().IdentLoc),
+ NamespaceFix)
<< FixItHint::CreateInsertion(rBraceToken.getLocation(), RBraces);
}
+
+ // Warn about nested inline namespaces.
+ if (FirstNestedInlineLoc.isValid())
+ Diag(FirstNestedInlineLoc, diag::ext_inline_nested_namespace_definition);
}
// If we're still good, complain about inline namespaces in non-C++0x now.
@@ -192,8 +221,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// Parse the contents of the namespace. This includes parsing recovery on
// any improperly nested namespaces.
- ParseInnerNamespace(ExtraIdentLoc, ExtraIdent, ExtraNamespaceLoc, 0,
- InlineLoc, attrs, T);
+ ParseInnerNamespace(ExtraNSs, 0, InlineLoc, attrs, T);
// Leave the namespace scope.
NamespaceScope.Exit();
@@ -206,13 +234,11 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
}
/// ParseInnerNamespace - Parse the contents of a namespace.
-void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
- std::vector<IdentifierInfo *> &Ident,
- std::vector<SourceLocation> &NamespaceLoc,
+void Parser::ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker) {
- if (index == Ident.size()) {
+ if (index == InnerNSs.size()) {
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
ParsedAttributesWithRange attrs(AttrFactory);
@@ -233,14 +259,13 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
ParseScope NamespaceScope(this, Scope::DeclScope);
UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
- getCurScope(), SourceLocation(), NamespaceLoc[index], IdentLoc[index],
- Ident[index], Tracker.getOpenLocation(), attrs,
- ImplicitUsingDirectiveDecl);
+ getCurScope(), InnerNSs[index].InlineLoc, InnerNSs[index].NamespaceLoc,
+ InnerNSs[index].IdentLoc, InnerNSs[index].Ident,
+ Tracker.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl);
assert(!ImplicitUsingDirectiveDecl &&
"nested namespace definition cannot define anonymous namespace");
- ParseInnerNamespace(IdentLoc, Ident, NamespaceLoc, ++index, InlineLoc,
- attrs, Tracker);
+ ParseInnerNamespace(InnerNSs, ++index, InlineLoc, attrs, Tracker);
NamespaceScope.Exit();
Actions.ActOnFinishNamespaceDef(NamespcDecl, Tracker.getCloseLocation());
@@ -365,7 +390,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
case tok::r_brace:
if (!NestedModules)
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
default:
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
@@ -2321,32 +2346,22 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
if (D.isFunctionDeclarator()) {
auto &Function = D.getFunctionTypeInfo();
if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
- auto DeclSpecCheck = [&] (DeclSpec::TQ TypeQual,
- const char *FixItName,
- SourceLocation SpecLoc,
- unsigned* QualifierLoc) {
+ auto DeclSpecCheck = [&](DeclSpec::TQ TypeQual, StringRef FixItName,
+ SourceLocation SpecLoc) {
FixItHint Insertion;
- if (DS.getTypeQualifiers() & TypeQual) {
- if (!(Function.TypeQuals & TypeQual)) {
- std::string Name(FixItName);
- Name += " ";
- Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
- Function.TypeQuals |= TypeQual;
- *QualifierLoc = SpecLoc.getRawEncoding();
- }
- Diag(SpecLoc, diag::err_declspec_after_virtspec)
+ auto &MQ = Function.getOrCreateMethodQualifiers();
+ if (!(MQ.getTypeQualifiers() & TypeQual)) {
+ std::string Name(FixItName.data());
+ Name += " ";
+ Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
+ MQ.SetTypeQual(TypeQual, SpecLoc);
+ }
+ Diag(SpecLoc, diag::err_declspec_after_virtspec)
<< FixItName
<< VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
- << FixItHint::CreateRemoval(SpecLoc)
- << Insertion;
- }
+ << FixItHint::CreateRemoval(SpecLoc) << Insertion;
};
- DeclSpecCheck(DeclSpec::TQ_const, "const", DS.getConstSpecLoc(),
- &Function.ConstQualifierLoc);
- DeclSpecCheck(DeclSpec::TQ_volatile, "volatile", DS.getVolatileSpecLoc(),
- &Function.VolatileQualifierLoc);
- DeclSpecCheck(DeclSpec::TQ_restrict, "restrict", DS.getRestrictSpecLoc(),
- &Function.RestrictQualifierLoc);
+ DS.forEachQualifier(DeclSpecCheck);
}
// Parse ref-qualifiers.
@@ -2410,7 +2425,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
if (Tok.is(tok::at)) {
- if (getLangOpts().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
+ if (getLangOpts().ObjC && NextToken().isObjCAtKeyword(tok::objc_defs))
Diag(Tok, diag::err_at_defs_cxx);
else
Diag(Tok, diag::err_at_in_class);
@@ -3449,6 +3464,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ // FIXME: Add support for signature help inside initializer lists.
ExprResult InitList = ParseBraceInitializer();
if (InitList.isInvalid())
return true;
@@ -3466,7 +3482,20 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// Parse the optional expression-list.
ExprVector ArgExprs;
CommaLocsTy CommaLocs;
- if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, CommaLocs)) {
+ if (Tok.isNot(tok::r_paren) &&
+ ParseExpressionList(ArgExprs, CommaLocs, [&] {
+ QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
+ getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
+ T.getOpenLocation());
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ })) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ Actions.ProduceCtorInitMemberSignatureHelp(
+ getCurScope(), ConstructorDecl, SS, TemplateTypeTy, ArgExprs, II,
+ T.getOpenLocation());
+ CalledSignatureHelp = true;
+ }
SkipUntil(tok::r_paren, StopAtSemi);
return true;
}
@@ -3776,6 +3805,28 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
return nullptr;
+ case tok::numeric_constant: {
+ // If we got a numeric constant, check to see if it comes from a macro that
+ // corresponds to the predefined __clang__ macro. If it does, warn the user
+ // and recover by pretending they said _Clang instead.
+ if (Tok.getLocation().isMacroID()) {
+ SmallString<8> ExpansionBuf;
+ SourceLocation ExpansionLoc =
+ PP.getSourceManager().getExpansionLoc(Tok.getLocation());
+ StringRef Spelling = PP.getSpelling(ExpansionLoc, ExpansionBuf);
+ if (Spelling == "__clang__") {
+ SourceRange TokRange(
+ ExpansionLoc,
+ PP.getSourceManager().getExpansionLoc(Tok.getEndLoc()));
+ Diag(Tok, diag::warn_wrong_clang_attr_namespace)
+ << FixItHint::CreateReplacement(TokRange, "_Clang");
+ Loc = ConsumeToken();
+ return &PP.getIdentifierTable().get("_Clang");
+ }
+ }
+ return nullptr;
+ }
+
case tok::ampamp: // 'and'
case tok::pipe: // 'bitor'
case tok::pipepipe: // 'or'
@@ -3854,7 +3905,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
return false;
}
- if (ScopeName && ScopeName->getName() == "gnu") {
+ if (ScopeName && (ScopeName->isStr("gnu") || ScopeName->isStr("__gnu__"))) {
// GNU-scoped attributes have some special cases to handle GNU-specific
// behaviors.
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
@@ -3864,10 +3915,9 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
unsigned NumArgs;
// Some Clang-scoped attributes have some special parsing behavior.
- if (ScopeName && ScopeName->getName() == "clang")
- NumArgs =
- ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
+ NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Syntax);
else
NumArgs =
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
@@ -3875,7 +3925,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
if (!Attrs.empty() &&
IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
- ParsedAttr &Attr = *Attrs.begin();
+ ParsedAttr &Attr = Attrs.back();
// If the attribute is a standard or built-in attribute and we are
// parsing an argument list, we need to determine whether this attribute
// was allowed to have an argument list (such as [[deprecated]]), and how
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 2a65eec8a0f6..4bcbebcbb48e 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -315,6 +315,19 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
return LHS;
}
+ // In Objective-C++, alternative operator tokens can be used as keyword args
+ // in message expressions. Unconsume the token so that it can reinterpreted
+ // as an identifier in ParseObjCMessageExpressionBody. i.e., we support:
+ // [foo meth:0 and:0];
+ // [foo not_eq];
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
+ Tok.isOneOf(tok::colon, tok::r_square) &&
+ OpToken.getIdentifierInfo() != nullptr) {
+ PP.EnterToken(Tok);
+ Tok = OpToken;
+ return LHS;
+ }
+
// Special case handling for the ternary operator.
ExprResult TernaryMiddle(true);
if (NextTokPrec == prec::Conditional) {
@@ -380,10 +393,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
}
}
- // Code completion for the right-hand side of an assignment expression
- // goes through a special hook that takes the left-hand side into account.
- if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) {
- Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get());
+ // Code completion for the right-hand side of a binary expression goes
+ // through a special hook that takes the left-hand side into account.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteBinaryRHS(getCurScope(), LHS.get(),
+ OpToken.getKind());
cutOffParsing();
return ExprError();
}
@@ -944,7 +958,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation ILoc = ConsumeToken();
// Support 'Class.property' and 'super.property' notation.
- if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
+ if (getLangOpts().ObjC && Tok.is(tok::period) &&
(Actions.getTypeName(II, ILoc, getCurScope()) ||
// Allow the base to be 'super' if in an objc-method.
(&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
@@ -974,7 +988,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// the token sequence is ill-formed. However, if there's a ':' or ']' after
// that identifier, this is probably a message send with a missing open
// bracket. Treat it as such.
- if (getLangOpts().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ if (getLangOpts().ObjC && &II == Ident_super && !InMessageExpression &&
getCurScope()->isInObjcMethodScope() &&
((Tok.is(tok::identifier) &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
@@ -989,7 +1003,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// send that's missing the opening '['. Recovery
// appropriately. Also take this path if we're performing code
// completion after an Objective-C class name.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
((Tok.is(tok::identifier) && !InMessageExpression) ||
Tok.is(tok::code_completion))) {
const Token& Next = NextToken();
@@ -1162,7 +1176,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
if (!getLangOpts().C11)
Diag(Tok, diag::ext_c11_alignment) << Tok.getName();
- // fallthrough
+ LLVM_FALLTHROUGH;
case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
// unary-expression: '__alignof' '(' type-name ')'
@@ -1228,7 +1242,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Ty.get(), nullptr);
break;
}
- // Fall through
+ LLVM_FALLTHROUGH;
case tok::annot_decltype:
case tok::kw_char:
@@ -1416,7 +1430,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
case tok::l_square:
if (getLangOpts().CPlusPlus11) {
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// C++11 lambda expressions and Objective-C message sends both start with a
// square bracket. There are three possibilities here:
// we have a valid lambda expression, we have an invalid lambda
@@ -1430,11 +1444,11 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseLambdaExpression();
break;
}
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
Res = ParseObjCMessageExpression();
break;
}
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
NotCastExpr = true;
return ExprError();
@@ -1498,7 +1512,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// If we see identifier: after an expression, and we're not already in a
// message send, then this is probably a message send with a missing
// opening bracket '['.
- if (getLangOpts().ObjC1 && !InMessageExpression &&
+ if (getLangOpts().ObjC && !InMessageExpression &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
nullptr, LHS.get());
@@ -1516,7 +1530,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// actually another message send. In this case, do some look-ahead to see
// if the contents of the square brackets are obviously not a valid
// expression and recover by pretending there is no suffix.
- if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
+ if (getLangOpts().ObjC && Tok.isAtStartOfLine() &&
isSimpleObjCMessageExpression())
return LHS;
@@ -1637,7 +1651,10 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CommaLocsTy CommaLocs;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteCall(getCurScope(), LHS.get(), None);
+ QualType PreferredType = Actions.ProduceCallSignatureHelp(
+ getCurScope(), LHS.get(), None, PT.getOpenLocation());
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
cutOffParsing();
return ExprError();
}
@@ -1645,9 +1662,21 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (OpKind == tok::l_paren || !LHS.isInvalid()) {
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(ArgExprs, CommaLocs, [&] {
- Actions.CodeCompleteCall(getCurScope(), LHS.get(), ArgExprs);
- })) {
+ QualType PreferredType = Actions.ProduceCallSignatureHelp(
+ getCurScope(), LHS.get(), ArgExprs, PT.getOpenLocation());
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ })) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
+ // If we got an error when parsing expression list, we don't call
+ // the CodeCompleteCall handler inside the parser. So call it here
+ // to make sure we get overload suggestions even when we are in the
+ // middle of a parameter.
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ Actions.ProduceCallSignatureHelp(getCurScope(), LHS.get(),
+ ArgExprs, PT.getOpenLocation());
+ CalledSignatureHelp = true;
+ }
LHS = ExprError();
} else if (LHS.isInvalid()) {
for (auto &E : ArgExprs)
@@ -1738,11 +1767,13 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Expr *Base = LHS.get();
Expr *CorrectedBase = CorrectedLHS.get();
+ if (!CorrectedBase && !getLangOpts().CPlusPlus)
+ CorrectedBase = Base;
// Code completion for a member access expression.
Actions.CodeCompleteMemberReferenceExpr(
getCurScope(), Base, CorrectedBase, OpLoc, OpKind == tok::arrow,
- Base && ExprStatementTokLoc == Base->getLocStart());
+ Base && ExprStatementTokLoc == Base->getBeginLoc());
cutOffParsing();
return ExprError();
@@ -1763,7 +1794,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// FIXME: Add support for explicit call of template constructor.
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
- if (getLangOpts().ObjC2 && OpKind == tok::period &&
+ if (getLangOpts().ObjC && OpKind == tok::period &&
Tok.is(tok::kw_class)) {
// Objective-C++:
// After a '.' in a member access expression, treat the keyword
@@ -1779,7 +1810,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/*EnteringContext=*/false,
/*AllowDestructorName=*/true,
/*AllowConstructorName=*/
- getLangOpts().MicrosoftExt,
+ getLangOpts().MicrosoftExt &&
+ SS.isNotEmpty(),
/*AllowDeductionGuide=*/false,
ObjectType, &TemplateKWLoc, Name)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
@@ -1992,8 +2024,10 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
CastRange);
UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
- if (OpTok.isOneOf(tok::kw_alignof, tok::kw___alignof, tok::kw__Alignof))
+ if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
ExprKind = UETT_AlignOf;
+ else if (OpTok.is(tok::kw___alignof))
+ ExprKind = UETT_PreferredAlignOf;
else if (OpTok.is(tok::kw_vec_step))
ExprKind = UETT_VecStep;
else if (OpTok.is(tok::kw___builtin_omp_required_simd_align))
@@ -2306,7 +2340,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
}
// Diagnose use of bridge casts in non-arc mode.
- bool BridgeCast = (getLangOpts().ObjC2 &&
+ bool BridgeCast = (getLangOpts().ObjC &&
Tok.isOneOf(tok::kw___bridge,
tok::kw___bridge_transfer,
tok::kw___bridge_retained,
@@ -2416,7 +2450,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// this is probably an Objective-C message send where the leading '[' is
// missing. Recover as if that were the case.
if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
- !InMessageExpression && getLangOpts().ObjC1 &&
+ !InMessageExpression && getLangOpts().ObjC &&
(NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
TypeResult Ty;
{
@@ -2502,7 +2536,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
}
// Reject the cast of super idiom in ObjC.
- if (Tok.is(tok::identifier) && getLangOpts().ObjC1 &&
+ if (Tok.is(tok::identifier) && getLangOpts().ObjC &&
Tok.getIdentifierInfo() == Ident_super &&
getCurScope()->isInObjcMethodScope() &&
GetLookAheadToken(1).isNot(tok::period)) {
@@ -2978,12 +3012,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*NumArgs=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 663c397ee049..3caec6b4def6 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -235,22 +235,11 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
while (true) {
if (HasScopeSpecifier) {
- // C++ [basic.lookup.classref]p5:
- // If the qualified-id has the form
- //
- // ::class-name-or-namespace-name::...
- //
- // the class-name-or-namespace-name is looked up in global scope as a
- // class-name or namespace-name.
- //
- // To implement this, we clear out the object type as soon as we've
- // seen a leading '::' or part of a nested-name-specifier.
- ObjectType = nullptr;
-
if (Tok.is(tok::code_completion)) {
// Code completion for a nested-name-specifier, where the code
// completion token follows the '::'.
- Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext);
+ Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext,
+ ObjectType.get());
// Include code completion token into the range of the scope otherwise
// when we try to annotate the scope tokens the dangling code completion
// token will cause assertion in
@@ -259,6 +248,18 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
cutOffParsing();
return true;
}
+
+ // C++ [basic.lookup.classref]p5:
+ // If the qualified-id has the form
+ //
+ // ::class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name is looked up in global scope as a
+ // class-name or namespace-name.
+ //
+ // To implement this, we clear out the object type as soon as we've
+ // seen a leading '::' or part of a nested-name-specifier.
+ ObjectType = nullptr;
}
// nested-name-specifier:
@@ -774,7 +775,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// send. In that case, fail here and let the ObjC message
// expression parser perform the completion.
if (Tok.is(tok::code_completion) &&
- !(getLangOpts().ObjC1 && Intro.Default == LCD_None &&
+ !(getLangOpts().ObjC && Intro.Default == LCD_None &&
!Intro.Captures.empty())) {
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
@@ -790,7 +791,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
if (Tok.is(tok::code_completion)) {
// If we're in Objective-C++ and we have a bare '[', then this is more
// likely to be a message receiver.
- if (getLangOpts().ObjC1 && first)
+ if (getLangOpts().ObjC && first)
Actions.CodeCompleteObjCMessageReceiver(getCurScope());
else
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
@@ -1205,12 +1206,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*hasProto=*/true,
/*isAmbiguous=*/false, LParenLoc, ParamInfo.data(),
ParamInfo.size(), EllipsisLoc, RParenLoc,
- DS.getTypeQualifiers(),
/*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
+ /*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
ESpecRange, DynamicExceptions.data(),
DynamicExceptionRanges.data(), DynamicExceptions.size(),
NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
@@ -1272,12 +1269,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
/*RefQualifierIsLValueRef=*/true,
- /*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc, MutableLoc, EST_None,
+ /*RefQualifierLoc=*/NoLoc, MutableLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
@@ -1674,8 +1667,8 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
return Init;
Expr *InitList = Init.get();
return Actions.ActOnCXXTypeConstructExpr(
- TypeRep, InitList->getLocStart(), MultiExprArg(&InitList, 1),
- InitList->getLocEnd(), /*ListInitialization=*/true);
+ TypeRep, InitList->getBeginLoc(), MultiExprArg(&InitList, 1),
+ InitList->getEndLoc(), /*ListInitialization=*/true);
} else {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1685,10 +1678,18 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(Exprs, CommaLocs, [&] {
- Actions.CodeCompleteConstructor(getCurScope(),
- TypeRep.get()->getCanonicalTypeInternal(),
- DS.getLocEnd(), Exprs);
- })) {
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DS.getEndLoc(), Exprs, T.getOpenLocation());
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ })) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DS.getEndLoc(), Exprs, T.getOpenLocation());
+ CalledSignatureHelp = true;
+ }
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
}
@@ -1730,10 +1731,14 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// \param Loc The location of the start of the statement that requires this
/// condition, e.g., the "for" in a for loop.
///
+/// \param FRI If non-null, a for range declaration is permitted, and if
+/// present will be parsed and stored here, and a null result will be returned.
+///
/// \returns The parsed condition.
Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
- Sema::ConditionKind CK) {
+ Sema::ConditionKind CK,
+ ForRangeInfo *FRI) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
if (Tok.is(tok::code_completion)) {
@@ -1753,7 +1758,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
};
// Determine what kind of thing we have.
- switch (isCXXConditionDeclarationOrInitStatement(InitStmt)) {
+ switch (isCXXConditionDeclarationOrInitStatement(InitStmt, FRI)) {
case ConditionOrInitStatement::Expression: {
ProhibitAttributes(attrs);
@@ -1761,7 +1766,13 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// if (; true);
if (InitStmt && Tok.is(tok::semi)) {
WarnOnInit();
- SourceLocation SemiLoc = ConsumeToken();
+ SourceLocation SemiLoc = Tok.getLocation();
+ if (!Tok.hasLeadingEmptyMacro() && !SemiLoc.isMacroID()) {
+ Diag(SemiLoc, diag::warn_empty_init_statement)
+ << (CK == Sema::ConditionKind::Switch)
+ << FixItHint::CreateRemoval(SemiLoc);
+ }
+ ConsumeToken();
*InitStmt = Actions.ActOnNullStmt(SemiLoc);
return ParseCXXCondition(nullptr, Loc, CK);
}
@@ -1791,6 +1802,15 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
return ParseCXXCondition(nullptr, Loc, CK);
}
+ case ConditionOrInitStatement::ForRangeDecl: {
+ assert(FRI && "should not parse a for range declaration here");
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(
+ DeclaratorContext::ForContext, DeclEnd, attrs, false, FRI);
+ FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+ return Sema::ConditionResult();
+ }
+
case ConditionOrInitStatement::ConditionDecl:
case ConditionOrInitStatement::Error:
break;
@@ -2817,13 +2837,22 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
if (Tok.isNot(tok::r_paren)) {
CommaLocsTy CommaLocs;
if (ParseExpressionList(ConstructorArgs, CommaLocs, [&] {
- ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(),
- DeclaratorInfo).get();
- Actions.CodeCompleteConstructor(getCurScope(),
- TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getLocEnd(),
- ConstructorArgs);
- })) {
+ ParsedType TypeRep =
+ Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ })) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ ParsedType TypeRep =
+ Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+ Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ CalledSignatureHelp = true;
+ }
SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch);
return ExprError();
}
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 0cd550bc6a47..7742a5087cf0 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -209,7 +209,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// send) or send to 'super', parse this as a message send
// expression. We handle C++ and C separately, since C++ requires
// much more complicated parsing.
- if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus) {
+ if (getLangOpts().ObjC && getLangOpts().CPlusPlus) {
// Send to 'super'.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
NextToken().isNot(tok::period) &&
@@ -242,7 +242,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// adopt the expression for further analysis below.
// FIXME: potentially-potentially evaluated expression above?
Idx = ExprResult(static_cast<Expr*>(TypeOrExpr));
- } else if (getLangOpts().ObjC1 && Tok.is(tok::identifier)) {
+ } else if (getLangOpts().ObjC && Tok.is(tok::identifier)) {
IdentifierInfo *II = Tok.getIdentifierInfo();
SourceLocation IILoc = Tok.getLocation();
ParsedType ReceiverType;
@@ -312,7 +312,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() {
// tokens are '...' or ']' or an objc message send. If this is an objc
// message send, handle it now. An objc-message send is the start of
// an assignment-expression production.
- if (getLangOpts().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ if (getLangOpts().ObjC && Tok.isNot(tok::ellipsis) &&
Tok.isNot(tok::r_square)) {
CheckArrayDesignatorSyntax(*this, Tok.getLocation(), Desig);
return ParseAssignmentExprWithObjCMessageExprStart(
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 99e5edb9d4a3..bd55f7179399 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -178,8 +178,7 @@ void Parser::CheckNestedObjCContexts(SourceLocation AtLoc)
Diag(AtLoc, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(AtLoc, "@end\n");
if (Decl)
- Diag(Decl->getLocStart(), diag::note_objc_container_start)
- << (int) ock;
+ Diag(Decl->getBeginLoc(), diag::note_objc_container_start) << (int)ock;
}
///
@@ -264,7 +263,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
categoryId = Tok.getIdentifierInfo();
categoryLoc = ConsumeToken();
}
- else if (!getLangOpts().ObjC2) {
+ else if (!getLangOpts().ObjC) {
Diag(Tok, diag::err_expected)
<< tok::identifier; // missing category name.
return nullptr;
@@ -384,12 +383,12 @@ static void addContextSensitiveTypeNullability(Parser &P,
if (D.getNumTypeObjects() > 0) {
// Add the attribute to the declarator chunk nearest the declarator.
- D.getTypeObject(0).getAttrs().addAtStart(
+ D.getTypeObject(0).getAttrs().addAtEnd(
getNullabilityAttr(D.getAttributePool()));
} else if (!addedToDeclSpec) {
// Otherwise, just put it on the declaration specifiers (if one
// isn't there already).
- D.getMutableDeclSpec().getAttributes().addAtStart(
+ D.getMutableDeclSpec().getAttributes().addAtEnd(
getNullabilityAttr(D.getMutableDeclSpec().getAttributes().getPool()));
addedToDeclSpec = true;
}
@@ -690,8 +689,8 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
case tok::objc_interface:
Diag(AtLoc, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(AtLoc, "@end\n");
- Diag(CDecl->getLocStart(), diag::note_objc_container_start)
- << (int) Actions.getObjCContainerKind();
+ Diag(CDecl->getBeginLoc(), diag::note_objc_container_start)
+ << (int)Actions.getObjCContainerKind();
ConsumeToken();
break;
@@ -706,7 +705,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
break;
case tok::objc_property:
- if (!getLangOpts().ObjC2)
+ if (!getLangOpts().ObjC)
Diag(AtLoc, diag::err_objc_properties_require_objc2);
ObjCDeclSpec OCDS;
@@ -776,8 +775,8 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
} else {
Diag(Tok, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(Tok.getLocation(), "\n@end\n");
- Diag(CDecl->getLocStart(), diag::note_objc_container_start)
- << (int) Actions.getObjCContainerKind();
+ Diag(CDecl->getBeginLoc(), diag::note_objc_container_start)
+ << (int)Actions.getObjCContainerKind();
AtEnd.setBegin(Tok.getLocation());
AtEnd.setEnd(Tok.getLocation());
}
@@ -1104,7 +1103,7 @@ bool Parser::isTokIdentifier_in() const {
// FIXME: May have to do additional look-ahead to only allow for
// valid tokens following an 'in'; such as an identifier, unary operators,
// '[' etc.
- return (getLangOpts().ObjC2 && Tok.is(tok::identifier) &&
+ return (getLangOpts().ObjC && Tok.is(tok::identifier) &&
Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
}
@@ -1198,7 +1197,7 @@ static void takeDeclAttributes(ParsedAttributesView &attrs,
for (auto &AL : llvm::reverse(from)) {
if (!AL.isUsedAsTypeAttr()) {
from.remove(&AL);
- attrs.addAtStart(&AL);
+ attrs.addAtEnd(&AL);
}
}
}
@@ -1338,7 +1337,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1365,7 +1364,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1401,7 +1400,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(paramAttrs);
MaybeParseCXX11Attributes(paramAttrs);
ArgInfo.ArgAttrs = paramAttrs;
@@ -1485,7 +1484,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// FIXME: Add support for optional parameter list...
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC2)
+ if (getLangOpts().ObjC)
MaybeParseGNUAttributes(methodAttrs);
MaybeParseCXX11Attributes(methodAttrs);
@@ -1551,7 +1550,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
TypeResult Parser::parseObjCProtocolQualifierType(SourceLocation &rAngleLoc) {
assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
- assert(getLangOpts().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+ assert(getLangOpts().ObjC && "Protocol qualifiers only exist in Objective-C");
SourceLocation lAngleLoc;
SmallVector<Decl *, 8> protocols;
@@ -2223,7 +2222,7 @@ Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
if (P.isEofOrEom()) {
P.Diag(P.Tok, diag::err_objc_missing_end)
<< FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
- P.Diag(Dcl->getLocStart(), diag::note_objc_container_start)
+ P.Diag(Dcl->getBeginLoc(), diag::note_objc_container_start)
<< Sema::OCK_Implementation;
}
}
@@ -2742,7 +2741,7 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
// Otherwise, eat the semicolon.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return Actions.ActOnExprStmt(Res);
+ return Actions.ActOnExprStmt(Res, isExprValueDiscarded());
}
ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
@@ -2946,14 +2945,14 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
/// This routine will only return true for a subset of valid message-send
/// expressions.
bool Parser::isSimpleObjCMessageExpression() {
- assert(Tok.is(tok::l_square) && getLangOpts().ObjC1 &&
+ assert(Tok.is(tok::l_square) && getLangOpts().ObjC &&
"Incorrect start for isSimpleObjCMessageExpression");
return GetLookAheadToken(1).is(tok::identifier) &&
GetLookAheadToken(2).is(tok::identifier);
}
bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
- if (!getLangOpts().ObjC1 || !NextToken().is(tok::identifier) ||
+ if (!getLangOpts().ObjC || !NextToken().is(tok::identifier) ||
InMessageExpression)
return false;
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index a413e96a91e7..dd2a8aae9f2f 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -314,7 +314,7 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
Actions.ActOnOpenMPDeclareReductionCombinerStart(getCurScope(), D);
ExprResult CombinerResult =
Actions.ActOnFinishFullExpr(ParseAssignmentExpression().get(),
- D->getLocation(), /*DiscardedValue=*/true);
+ D->getLocation(), /*DiscardedValue*/ false);
Actions.ActOnOpenMPDeclareReductionCombinerEnd(D, CombinerResult.get());
if (CombinerResult.isInvalid() && Tok.isNot(tok::r_paren) &&
@@ -353,12 +353,18 @@ Parser::ParseOpenMPDeclareReductionDirective(AccessSpecifier AS) {
// Check if initializer is omp_priv <init_expr> or something else.
if (Tok.is(tok::identifier) &&
Tok.getIdentifierInfo()->isStr("omp_priv")) {
- ConsumeToken();
- ParseOpenMPReductionInitializerForDecl(OmpPrivParm);
+ if (Actions.getLangOpts().CPlusPlus) {
+ InitializerResult = Actions.ActOnFinishFullExpr(
+ ParseAssignmentExpression().get(), D->getLocation(),
+ /*DiscardedValue*/ false);
+ } else {
+ ConsumeToken();
+ ParseOpenMPReductionInitializerForDecl(OmpPrivParm);
+ }
} else {
InitializerResult = Actions.ActOnFinishFullExpr(
ParseAssignmentExpression().get(), D->getLocation(),
- /*DiscardedValue=*/true);
+ /*DiscardedValue*/ false);
}
Actions.ActOnOpenMPDeclareReductionInitializerEnd(
D, InitializerResult.get(), OmpPrivParm);
@@ -415,11 +421,22 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
ExprVector Exprs;
CommaLocsTy CommaLocs;
- if (ParseExpressionList(Exprs, CommaLocs, [this, OmpPrivParm, &Exprs] {
- Actions.CodeCompleteConstructor(
- getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs);
- })) {
+ SourceLocation LParLoc = T.getOpenLocation();
+ if (ParseExpressionList(
+ Exprs, CommaLocs, [this, OmpPrivParm, LParLoc, &Exprs] {
+ QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
+ getCurScope(),
+ OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc);
+ CalledSignatureHelp = true;
+ Actions.CodeCompleteExpression(getCurScope(), PreferredType);
+ })) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp) {
+ Actions.ProduceConstructorSignatureHelp(
+ getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc);
+ CalledSignatureHelp = true;
+ }
Actions.ActOnInitializerError(OmpPrivParm);
SkipUntil(tok::r_paren, tok::annot_pragma_openmp_end, StopBeforeMatch);
} else {
@@ -478,7 +495,7 @@ public:
Sema &Actions = P.getActions();
// Allow 'this' within late-parsed attributes.
- ThisScope = new Sema::CXXThisScopeRAII(Actions, RD, /*TypeQuals=*/0,
+ ThisScope = new Sema::CXXThisScopeRAII(Actions, RD, Qualifiers(),
ND && ND->isCXXInstanceMember());
// If the Decl is templatized, add template parameters to scope.
@@ -627,6 +644,60 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
LinModifiers, Steps, SourceRange(Loc, EndLoc));
}
+Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
+ // OpenMP 4.5 syntax with list of entities.
+ Sema::NamedDeclSetType SameDirectiveDecls;
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OMPDeclareTargetDeclAttr::MapTypeTy MT = OMPDeclareTargetDeclAttr::MT_To;
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ StringRef ClauseName = II->getName();
+ // Parse 'to|link' clauses.
+ if (!OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT)) {
+ Diag(Tok, diag::err_omp_declare_target_unexpected_clause) << ClauseName;
+ break;
+ }
+ ConsumeToken();
+ }
+ auto &&Callback = [this, MT, &SameDirectiveDecls](
+ CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
+ Actions.ActOnOpenMPDeclareTargetName(getCurScope(), SS, NameInfo, MT,
+ SameDirectiveDecls);
+ };
+ if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
+ /*AllowScopeSpecifier=*/true))
+ break;
+
+ // Consume optional ','.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ ConsumeAnyToken();
+ SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
+ SameDirectiveDecls.end());
+ if (Decls.empty())
+ return DeclGroupPtrTy();
+ return Actions.BuildDeclaratorGroup(Decls);
+}
+
+void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+ SourceLocation DTLoc) {
+ if (DKind != OMPD_end_declare_target) {
+ Diag(Tok, diag::err_expected_end_declare_target);
+ Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
+ return;
+ }
+ ConsumeAnyToken();
+ if (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
+ << getOpenMPDirectiveName(OMPD_end_declare_target);
+ SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
+ }
+ // Skip the last annot_pragma_openmp_end.
+ ConsumeAnyToken();
+}
+
/// Parsing of declarative OpenMP directives.
///
/// threadprivate-directive:
@@ -642,6 +713,10 @@ Parser::ParseOMPDeclareSimdClauses(Parser::DeclGroupPtrTy Ptr,
/// annot_pragma_openmp_end
/// <function declaration/definition>
///
+/// requires directive:
+/// annot_pragma_openmp 'requires' <clause> [[[,] <clause>] ... ]
+/// annot_pragma_openmp_end
+///
Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType, Decl *Tag) {
@@ -670,6 +745,46 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
break;
}
+ case OMPD_requires: {
+ SourceLocation StartLoc = ConsumeToken();
+ SmallVector<OMPClause *, 5> Clauses;
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>, OMPC_unknown + 1>
+ FirstClauses(OMPC_unknown + 1);
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::err_omp_expected_clause)
+ << getOpenMPDirectiveName(OMPD_requires);
+ break;
+ }
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ OMPClause *Clause =
+ ParseOpenMPClause(OMPD_requires, CKind, !FirstClauses[CKind].getInt());
+ SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end, StopBeforeMatch);
+ FirstClauses[CKind].setInt(true);
+ if (Clause != nullptr)
+ Clauses.push_back(Clause);
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Actions.EndOpenMPClause();
+ break;
+ }
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+ // Consume final annot_pragma_openmp_end
+ if (Clauses.size() == 0) {
+ Diag(Tok, diag::err_omp_expected_clause)
+ << getOpenMPDirectiveName(OMPD_requires);
+ ConsumeAnnotationToken();
+ return nullptr;
+ }
+ ConsumeAnnotationToken();
+ return Actions.ActOnOpenMPRequiresDirective(StartLoc, Clauses);
+ }
case OMPD_declare_reduction:
ConsumeToken();
if (DeclGroupPtrTy Res = ParseOpenMPDeclareReductionDirective(AS)) {
@@ -724,43 +839,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- // OpenMP 4.5 syntax with list of entities.
- Sema::NamedDeclSetType SameDirectiveDecls;
- while (Tok.isNot(tok::annot_pragma_openmp_end)) {
- OMPDeclareTargetDeclAttr::MapTypeTy MT =
- OMPDeclareTargetDeclAttr::MT_To;
- if (Tok.is(tok::identifier)) {
- IdentifierInfo *II = Tok.getIdentifierInfo();
- StringRef ClauseName = II->getName();
- // Parse 'to|link' clauses.
- if (!OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName,
- MT)) {
- Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName;
- break;
- }
- ConsumeToken();
- }
- auto &&Callback = [this, MT, &SameDirectiveDecls](
- CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
- Actions.ActOnOpenMPDeclareTargetName(getCurScope(), SS, NameInfo, MT,
- SameDirectiveDecls);
- };
- if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
- /*AllowScopeSpecifier=*/true))
- break;
-
- // Consume optional ','.
- if (Tok.is(tok::comma))
- ConsumeToken();
- }
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- ConsumeAnyToken();
- SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
- SameDirectiveDecls.end());
- if (Decls.empty())
- return DeclGroupPtrTy();
- return Actions.BuildDeclaratorGroup(Decls);
+ return ParseOMPDeclareTargetClauses();
}
// Skip the last annot_pragma_openmp_end.
@@ -771,8 +850,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
llvm::SmallVector<Decl *, 4> Decls;
DKind = parseOpenMPDirectiveKind(*this);
- while (DKind != OMPD_end_declare_target && DKind != OMPD_declare_target &&
- Tok.isNot(tok::eof) && Tok.isNot(tok::r_brace)) {
+ while (DKind != OMPD_end_declare_target && Tok.isNot(tok::eof) &&
+ Tok.isNot(tok::r_brace)) {
DeclGroupPtrTy Ptr;
// Here we expect to see some function declaration.
if (AS == AS_none) {
@@ -799,19 +878,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
}
}
- if (DKind == OMPD_end_declare_target) {
- ConsumeAnyToken();
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- Diag(Tok, diag::warn_omp_extra_tokens_at_eol)
- << getOpenMPDirectiveName(OMPD_end_declare_target);
- SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- }
- // Skip the last annot_pragma_openmp_end.
- ConsumeAnyToken();
- } else {
- Diag(Tok, diag::err_expected_end_declare_target);
- Diag(DTLoc, diag::note_matching) << "'#pragma omp declare target'";
- }
+ ParseOMPEndDeclareTargetDirective(DKind, DTLoc);
Actions.ActOnFinishOpenMPDeclareTargetDirective();
return Actions.BuildDeclaratorGroup(Decls);
}
@@ -1124,6 +1191,7 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_requires:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
SkipUntil(tok::annot_pragma_openmp_end);
@@ -1275,11 +1343,15 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
break;
case OMPC_default:
case OMPC_proc_bind:
+ case OMPC_atomic_default_mem_order:
// OpenMP [2.14.3.1, Restrictions]
// Only a single default clause may be specified on a parallel, task or
// teams directive.
// OpenMP [2.5, parallel Construct, Restrictions]
// At most one proc_bind clause can appear on the directive.
+ // OpenMP [5.0, Requires directive, Restrictions]
+ // At most one atomic_default_mem_order clause can appear
+ // on the directive
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -1316,10 +1388,16 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_threads:
case OMPC_simd:
case OMPC_nogroup:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
// Only one nowait clause can appear on a for directive.
+ // OpenMP [5.0, Requires directive, Restrictions]
+ // Each of the requires clauses can appear at most once on the directive.
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -1377,7 +1455,7 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
ExprResult LHS(ParseCastExpression(
/*isUnaryExpression=*/false, /*isAddressOfOperand=*/false, NotTypeCast));
ExprResult Val(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
- Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
+ Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc, /*DiscardedValue*/ false);
// Parse ')'.
RLoc = Tok.getLocation();
@@ -1633,7 +1711,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(false, false, NotTypeCast));
Val = ParseRHSOfBinaryExpression(LHS, prec::Conditional);
- Val = Actions.ActOnFinishFullExpr(Val.get(), ELoc);
+ Val =
+ Actions.ActOnFinishFullExpr(Val.get(), ELoc, /*DiscardedValue*/ false);
}
// Parse ')'.
@@ -1696,6 +1775,79 @@ static bool ParseReductionId(Parser &P, CXXScopeSpec &ReductionIdScopeSpec,
nullptr, nullptr, ReductionId);
}
+/// Checks if the token is a valid map-type-modifier.
+static OpenMPMapModifierKind isMapModifier(Parser &P) {
+ Token Tok = P.getCurToken();
+ if (!Tok.is(tok::identifier))
+ return OMPC_MAP_MODIFIER_unknown;
+
+ Preprocessor &PP = P.getPreprocessor();
+ OpenMPMapModifierKind TypeModifier = static_cast<OpenMPMapModifierKind>(
+ getOpenMPSimpleClauseType(OMPC_map, PP.getSpelling(Tok)));
+ return TypeModifier;
+}
+
+/// Parse map-type-modifiers in map clause.
+/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
+/// where, map-type-modifier ::= always | close
+static void parseMapTypeModifiers(Parser &P,
+ Parser::OpenMPVarListDataTy &Data) {
+ Preprocessor &PP = P.getPreprocessor();
+ while (P.getCurToken().isNot(tok::colon)) {
+ Token Tok = P.getCurToken();
+ OpenMPMapModifierKind TypeModifier = isMapModifier(P);
+ if (TypeModifier == OMPC_MAP_MODIFIER_always ||
+ TypeModifier == OMPC_MAP_MODIFIER_close) {
+ Data.MapTypeModifiers.push_back(TypeModifier);
+ Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
+ P.ConsumeToken();
+ } else {
+ // For the case of unknown map-type-modifier or a map-type.
+ // Map-type is followed by a colon; the function returns when it
+ // encounters a token followed by a colon.
+ if (Tok.is(tok::comma)) {
+ P.Diag(Tok, diag::err_omp_map_type_modifier_missing);
+ P.ConsumeToken();
+ continue;
+ }
+ // Potential map-type token as it is followed by a colon.
+ if (PP.LookAhead(0).is(tok::colon))
+ return;
+ P.Diag(Tok, diag::err_omp_unknown_map_type_modifier);
+ P.ConsumeToken();
+ }
+ if (P.getCurToken().is(tok::comma))
+ P.ConsumeToken();
+ }
+}
+
+/// Checks if the token is a valid map-type.
+static OpenMPMapClauseKind isMapType(Parser &P) {
+ Token Tok = P.getCurToken();
+ // The map-type token can be either an identifier or the C++ delete keyword.
+ if (!Tok.isOneOf(tok::identifier, tok::kw_delete))
+ return OMPC_MAP_unknown;
+ Preprocessor &PP = P.getPreprocessor();
+ OpenMPMapClauseKind MapType = static_cast<OpenMPMapClauseKind>(
+ getOpenMPSimpleClauseType(OMPC_map, PP.getSpelling(Tok)));
+ return MapType;
+}
+
+/// Parse map-type in map clause.
+/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
+/// where, map-type ::= to | from | tofrom | alloc | release | delete
+static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
+ Token Tok = P.getCurToken();
+ if (Tok.is(tok::colon)) {
+ P.Diag(Tok, diag::err_omp_map_type_missing);
+ return;
+ }
+ Data.MapType = isMapType(P);
+ if (Data.MapType == OMPC_MAP_unknown)
+ P.Diag(Tok, diag::err_omp_unknown_map_type);
+ P.ConsumeToken();
+}
+
/// Parses clauses with list.
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
@@ -1703,7 +1855,6 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
- bool MapTypeModifierSpecified = false;
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
@@ -1775,104 +1926,40 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Handle map type for map clause.
ColonProtectionRAIIObject ColonRAII(*this);
- /// The map clause modifier token can be either a identifier or the C++
- /// delete keyword.
- auto &&IsMapClauseModifierToken = [](const Token &Tok) -> bool {
- return Tok.isOneOf(tok::identifier, tok::kw_delete);
- };
-
// The first identifier may be a list item, a map-type or a
- // map-type-modifier. The map modifier can also be delete which has the same
+ // map-type-modifier. The map-type can also be delete which has the same
// spelling of the C++ delete keyword.
- Data.MapType =
- IsMapClauseModifierToken(Tok)
- ? static_cast<OpenMPMapClauseKind>(
- getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)))
- : OMPC_MAP_unknown;
Data.DepLinMapLoc = Tok.getLocation();
- bool ColonExpected = false;
-
- if (IsMapClauseModifierToken(Tok)) {
- if (PP.LookAhead(0).is(tok::colon)) {
- if (Data.MapType == OMPC_MAP_unknown)
- Diag(Tok, diag::err_omp_unknown_map_type);
- else if (Data.MapType == OMPC_MAP_always)
- Diag(Tok, diag::err_omp_map_type_missing);
- ConsumeToken();
- } else if (PP.LookAhead(0).is(tok::comma)) {
- if (IsMapClauseModifierToken(PP.LookAhead(1)) &&
- PP.LookAhead(2).is(tok::colon)) {
- Data.MapTypeModifier = Data.MapType;
- if (Data.MapTypeModifier != OMPC_MAP_always) {
- Diag(Tok, diag::err_omp_unknown_map_type_modifier);
- Data.MapTypeModifier = OMPC_MAP_unknown;
- } else {
- MapTypeModifierSpecified = true;
- }
-
- ConsumeToken();
- ConsumeToken();
-
- Data.MapType =
- IsMapClauseModifierToken(Tok)
- ? static_cast<OpenMPMapClauseKind>(
- getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)))
- : OMPC_MAP_unknown;
- if (Data.MapType == OMPC_MAP_unknown ||
- Data.MapType == OMPC_MAP_always)
- Diag(Tok, diag::err_omp_unknown_map_type);
- ConsumeToken();
- } else {
- Data.MapType = OMPC_MAP_tofrom;
- Data.IsMapTypeImplicit = true;
- }
- } else if (IsMapClauseModifierToken(PP.LookAhead(0))) {
- if (PP.LookAhead(1).is(tok::colon)) {
- Data.MapTypeModifier = Data.MapType;
- if (Data.MapTypeModifier != OMPC_MAP_always) {
- Diag(Tok, diag::err_omp_unknown_map_type_modifier);
- Data.MapTypeModifier = OMPC_MAP_unknown;
- } else {
- MapTypeModifierSpecified = true;
- }
- ConsumeToken();
-
- Data.MapType =
- IsMapClauseModifierToken(Tok)
- ? static_cast<OpenMPMapClauseKind>(
- getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok)))
- : OMPC_MAP_unknown;
- if (Data.MapType == OMPC_MAP_unknown ||
- Data.MapType == OMPC_MAP_always)
- Diag(Tok, diag::err_omp_unknown_map_type);
- ConsumeToken();
- } else {
- Data.MapType = OMPC_MAP_tofrom;
- Data.IsMapTypeImplicit = true;
- }
- } else {
- Data.MapType = OMPC_MAP_tofrom;
- Data.IsMapTypeImplicit = true;
- }
- } else {
+ // Check for presence of a colon in the map clause.
+ TentativeParsingAction TPA(*this);
+ bool ColonPresent = false;
+ if (SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch)) {
+ if (Tok.is(tok::colon))
+ ColonPresent = true;
+ }
+ TPA.Revert();
+ // Only parse map-type-modifier[s] and map-type if a colon is present in
+ // the map clause.
+ if (ColonPresent) {
+ parseMapTypeModifiers(*this, Data);
+ parseMapType(*this, Data);
+ }
+ if (Data.MapType == OMPC_MAP_unknown) {
Data.MapType = OMPC_MAP_tofrom;
Data.IsMapTypeImplicit = true;
}
if (Tok.is(tok::colon))
Data.ColonLoc = ConsumeToken();
- else if (ColonExpected)
- Diag(Tok, diag::warn_pragma_expected_colon) << "map type";
}
bool IsComma =
(Kind != OMPC_reduction && Kind != OMPC_task_reduction &&
Kind != OMPC_in_reduction && Kind != OMPC_depend && Kind != OMPC_map) ||
(Kind == OMPC_reduction && !InvalidReductionId) ||
- (Kind == OMPC_map && Data.MapType != OMPC_MAP_unknown &&
- (!MapTypeModifierSpecified ||
- Data.MapTypeModifier == OMPC_MAP_always)) ||
+ (Kind == OMPC_map && Data.MapType != OMPC_MAP_unknown) ||
(Kind == OMPC_depend && Data.DepKind != OMPC_DEPEND_unknown);
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
@@ -1910,7 +1997,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ColonLoc = Tok.getLocation();
SourceLocation ELoc = ConsumeToken();
ExprResult Tail = ParseAssignmentExpression();
- Tail = Actions.ActOnFinishFullExpr(Tail.get(), ELoc);
+ Tail =
+ Actions.ActOnFinishFullExpr(Tail.get(), ELoc, /*DiscardedValue*/ false);
if (Tail.isUsable())
Data.TailExpr = Tail.get();
else
@@ -1957,7 +2045,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// depend-clause:
/// 'depend' '(' in | out | inout : list | source ')'
/// map-clause:
-/// 'map' '(' [ [ always , ]
+/// 'map' '(' [ [ always [,] ] [ close [,] ]
/// to | from | tofrom | alloc | release | delete ':' ] list ')';
/// to-clause:
/// 'to' '(' list ')'
@@ -1988,7 +2076,7 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
return Actions.ActOnOpenMPVarListClause(
Kind, Vars, Data.TailExpr, Loc, LOpen, Data.ColonLoc, Data.RLoc,
Data.ReductionIdScopeSpec, Data.ReductionId, Data.DepKind, Data.LinKind,
- Data.MapTypeModifier, Data.MapType, Data.IsMapTypeImplicit,
- Data.DepLinMapLoc);
+ Data.MapTypeModifiers, Data.MapTypeModifiersLoc, Data.MapType,
+ Data.IsMapTypeImplicit, Data.DepLinMapLoc);
}
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index 07f48e0779dc..380eb64997a7 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -15,10 +15,10 @@
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/LoopHint.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -106,8 +106,19 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
tok::OnOffSwitch OOS;
if (PP.LexOnOffSwitch(OOS))
return;
- if (OOS == tok::OOS_ON)
+ if (OOS == tok::OOS_ON) {
PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ }
+
+ MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
+ 1);
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_fenv_access);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setAnnotationEndLoc(Tok.getLocation());
+ Toks[0].setAnnotationValue(reinterpret_cast<void*>(
+ static_cast<uintptr_t>(OOS)));
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
}
};
@@ -250,112 +261,122 @@ struct PragmaAttributeHandler : public PragmaHandler {
} // end namespace
void Parser::initializePragmaHandlers() {
- AlignHandler.reset(new PragmaAlignHandler());
+ AlignHandler = llvm::make_unique<PragmaAlignHandler>();
PP.AddPragmaHandler(AlignHandler.get());
- GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler());
+ GCCVisibilityHandler = llvm::make_unique<PragmaGCCVisibilityHandler>();
PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
- OptionsHandler.reset(new PragmaOptionsHandler());
+ OptionsHandler = llvm::make_unique<PragmaOptionsHandler>();
PP.AddPragmaHandler(OptionsHandler.get());
- PackHandler.reset(new PragmaPackHandler());
+ PackHandler = llvm::make_unique<PragmaPackHandler>();
PP.AddPragmaHandler(PackHandler.get());
- MSStructHandler.reset(new PragmaMSStructHandler());
+ MSStructHandler = llvm::make_unique<PragmaMSStructHandler>();
PP.AddPragmaHandler(MSStructHandler.get());
- UnusedHandler.reset(new PragmaUnusedHandler());
+ UnusedHandler = llvm::make_unique<PragmaUnusedHandler>();
PP.AddPragmaHandler(UnusedHandler.get());
- WeakHandler.reset(new PragmaWeakHandler());
+ WeakHandler = llvm::make_unique<PragmaWeakHandler>();
PP.AddPragmaHandler(WeakHandler.get());
- RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler());
+ RedefineExtnameHandler = llvm::make_unique<PragmaRedefineExtnameHandler>();
PP.AddPragmaHandler(RedefineExtnameHandler.get());
- FPContractHandler.reset(new PragmaFPContractHandler());
+ FPContractHandler = llvm::make_unique<PragmaFPContractHandler>();
PP.AddPragmaHandler("STDC", FPContractHandler.get());
- STDCFENVHandler.reset(new PragmaSTDC_FENV_ACCESSHandler());
+ STDCFENVHandler = llvm::make_unique<PragmaSTDC_FENV_ACCESSHandler>();
PP.AddPragmaHandler("STDC", STDCFENVHandler.get());
- STDCCXLIMITHandler.reset(new PragmaSTDC_CX_LIMITED_RANGEHandler());
+ STDCCXLIMITHandler = llvm::make_unique<PragmaSTDC_CX_LIMITED_RANGEHandler>();
PP.AddPragmaHandler("STDC", STDCCXLIMITHandler.get());
- STDCUnknownHandler.reset(new PragmaSTDC_UnknownHandler());
+ STDCUnknownHandler = llvm::make_unique<PragmaSTDC_UnknownHandler>();
PP.AddPragmaHandler("STDC", STDCUnknownHandler.get());
- PCSectionHandler.reset(new PragmaClangSectionHandler(Actions));
+ PCSectionHandler = llvm::make_unique<PragmaClangSectionHandler>(Actions);
PP.AddPragmaHandler("clang", PCSectionHandler.get());
if (getLangOpts().OpenCL) {
- OpenCLExtensionHandler.reset(new PragmaOpenCLExtensionHandler());
+ OpenCLExtensionHandler = llvm::make_unique<PragmaOpenCLExtensionHandler>();
PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
}
if (getLangOpts().OpenMP)
- OpenMPHandler.reset(new PragmaOpenMPHandler());
+ OpenMPHandler = llvm::make_unique<PragmaOpenMPHandler>();
else
- OpenMPHandler.reset(new PragmaNoOpenMPHandler());
+ OpenMPHandler = llvm::make_unique<PragmaNoOpenMPHandler>();
PP.AddPragmaHandler(OpenMPHandler.get());
if (getLangOpts().MicrosoftExt ||
getTargetInfo().getTriple().isOSBinFormatELF()) {
- MSCommentHandler.reset(new PragmaCommentHandler(Actions));
+ MSCommentHandler = llvm::make_unique<PragmaCommentHandler>(Actions);
PP.AddPragmaHandler(MSCommentHandler.get());
}
if (getLangOpts().MicrosoftExt) {
- MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(Actions));
+ MSDetectMismatchHandler =
+ llvm::make_unique<PragmaDetectMismatchHandler>(Actions);
PP.AddPragmaHandler(MSDetectMismatchHandler.get());
- MSPointersToMembers.reset(new PragmaMSPointersToMembers());
+ MSPointersToMembers = llvm::make_unique<PragmaMSPointersToMembers>();
PP.AddPragmaHandler(MSPointersToMembers.get());
- MSVtorDisp.reset(new PragmaMSVtorDisp());
+ MSVtorDisp = llvm::make_unique<PragmaMSVtorDisp>();
PP.AddPragmaHandler(MSVtorDisp.get());
- MSInitSeg.reset(new PragmaMSPragma("init_seg"));
+ MSInitSeg = llvm::make_unique<PragmaMSPragma>("init_seg");
PP.AddPragmaHandler(MSInitSeg.get());
- MSDataSeg.reset(new PragmaMSPragma("data_seg"));
+ MSDataSeg = llvm::make_unique<PragmaMSPragma>("data_seg");
PP.AddPragmaHandler(MSDataSeg.get());
- MSBSSSeg.reset(new PragmaMSPragma("bss_seg"));
+ MSBSSSeg = llvm::make_unique<PragmaMSPragma>("bss_seg");
PP.AddPragmaHandler(MSBSSSeg.get());
- MSConstSeg.reset(new PragmaMSPragma("const_seg"));
+ MSConstSeg = llvm::make_unique<PragmaMSPragma>("const_seg");
PP.AddPragmaHandler(MSConstSeg.get());
- MSCodeSeg.reset(new PragmaMSPragma("code_seg"));
+ MSCodeSeg = llvm::make_unique<PragmaMSPragma>("code_seg");
PP.AddPragmaHandler(MSCodeSeg.get());
- MSSection.reset(new PragmaMSPragma("section"));
+ MSSection = llvm::make_unique<PragmaMSPragma>("section");
PP.AddPragmaHandler(MSSection.get());
- MSRuntimeChecks.reset(new PragmaMSRuntimeChecksHandler());
+ MSRuntimeChecks = llvm::make_unique<PragmaMSRuntimeChecksHandler>();
PP.AddPragmaHandler(MSRuntimeChecks.get());
- MSIntrinsic.reset(new PragmaMSIntrinsicHandler());
+ MSIntrinsic = llvm::make_unique<PragmaMSIntrinsicHandler>();
PP.AddPragmaHandler(MSIntrinsic.get());
- MSOptimize.reset(new PragmaMSOptimizeHandler());
+ MSOptimize = llvm::make_unique<PragmaMSOptimizeHandler>();
PP.AddPragmaHandler(MSOptimize.get());
}
if (getLangOpts().CUDA) {
- CUDAForceHostDeviceHandler.reset(
- new PragmaForceCUDAHostDeviceHandler(Actions));
+ CUDAForceHostDeviceHandler =
+ llvm::make_unique<PragmaForceCUDAHostDeviceHandler>(Actions);
PP.AddPragmaHandler("clang", CUDAForceHostDeviceHandler.get());
}
- OptimizeHandler.reset(new PragmaOptimizeHandler(Actions));
+ OptimizeHandler = llvm::make_unique<PragmaOptimizeHandler>(Actions);
PP.AddPragmaHandler("clang", OptimizeHandler.get());
- LoopHintHandler.reset(new PragmaLoopHintHandler());
+ LoopHintHandler = llvm::make_unique<PragmaLoopHintHandler>();
PP.AddPragmaHandler("clang", LoopHintHandler.get());
- UnrollHintHandler.reset(new PragmaUnrollHintHandler("unroll"));
+ UnrollHintHandler = llvm::make_unique<PragmaUnrollHintHandler>("unroll");
PP.AddPragmaHandler(UnrollHintHandler.get());
- NoUnrollHintHandler.reset(new PragmaUnrollHintHandler("nounroll"));
+ NoUnrollHintHandler = llvm::make_unique<PragmaUnrollHintHandler>("nounroll");
PP.AddPragmaHandler(NoUnrollHintHandler.get());
- FPHandler.reset(new PragmaFPHandler());
+ UnrollAndJamHintHandler =
+ llvm::make_unique<PragmaUnrollHintHandler>("unroll_and_jam");
+ PP.AddPragmaHandler(UnrollAndJamHintHandler.get());
+
+ NoUnrollAndJamHintHandler =
+ llvm::make_unique<PragmaUnrollHintHandler>("nounroll_and_jam");
+ PP.AddPragmaHandler(NoUnrollAndJamHintHandler.get());
+
+ FPHandler = llvm::make_unique<PragmaFPHandler>();
PP.AddPragmaHandler("clang", FPHandler.get());
- AttributePragmaHandler.reset(new PragmaAttributeHandler(AttrFactory));
+ AttributePragmaHandler =
+ llvm::make_unique<PragmaAttributeHandler>(AttrFactory);
PP.AddPragmaHandler("clang", AttributePragmaHandler.get());
}
@@ -451,6 +472,12 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler(NoUnrollHintHandler.get());
NoUnrollHintHandler.reset();
+ PP.RemovePragmaHandler(UnrollAndJamHintHandler.get());
+ UnrollAndJamHintHandler.reset();
+
+ PP.RemovePragmaHandler(NoUnrollAndJamHintHandler.get());
+ NoUnrollAndJamHintHandler.reset();
+
PP.RemovePragmaHandler("clang", FPHandler.get());
FPHandler.reset();
@@ -591,6 +618,30 @@ void Parser::HandlePragmaFPContract() {
ConsumeAnnotationToken();
}
+void Parser::HandlePragmaFEnvAccess() {
+ assert(Tok.is(tok::annot_pragma_fenv_access));
+ tok::OnOffSwitch OOS =
+ static_cast<tok::OnOffSwitch>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+
+ LangOptions::FEnvAccessModeKind FPC;
+ switch (OOS) {
+ case tok::OOS_ON:
+ FPC = LangOptions::FEA_On;
+ break;
+ case tok::OOS_OFF:
+ FPC = LangOptions::FEA_Off;
+ break;
+ case tok::OOS_DEFAULT: // FIXME: Add this cli option when it makes sense.
+ FPC = LangOptions::FEA_Off;
+ break;
+ }
+
+ Actions.ActOnPragmaFEnvAccess(FPC);
+ ConsumeAnnotationToken();
+}
+
+
StmtResult Parser::HandlePragmaCaptured()
{
assert(Tok.is(tok::annot_pragma_captured));
@@ -955,6 +1006,8 @@ static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
if (PragmaName.getIdentifierInfo()->getName() == "loop") {
PragmaString = "clang loop ";
PragmaString += Option.getIdentifierInfo()->getName();
+ } else if (PragmaName.getIdentifierInfo()->getName() == "unroll_and_jam") {
+ PragmaString = "unroll_and_jam";
} else {
assert(PragmaName.getIdentifierInfo()->getName() == "unroll" &&
"Unexpected pragma name");
@@ -986,7 +1039,10 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
// without an argument.
bool PragmaUnroll = PragmaNameInfo->getName() == "unroll";
bool PragmaNoUnroll = PragmaNameInfo->getName() == "nounroll";
- if (Toks.empty() && (PragmaUnroll || PragmaNoUnroll)) {
+ bool PragmaUnrollAndJam = PragmaNameInfo->getName() == "unroll_and_jam";
+ bool PragmaNoUnrollAndJam = PragmaNameInfo->getName() == "nounroll_and_jam";
+ if (Toks.empty() && (PragmaUnroll || PragmaNoUnroll || PragmaUnrollAndJam ||
+ PragmaNoUnrollAndJam)) {
ConsumeAnnotationToken();
Hint.Range = Info->PragmaName.getLocation();
return true;
@@ -999,24 +1055,31 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
// If no option is specified the argument is assumed to be a constant expr.
bool OptionUnroll = false;
+ bool OptionUnrollAndJam = false;
bool OptionDistribute = false;
+ bool OptionPipelineDisabled = false;
bool StateOption = false;
if (OptionInfo) { // Pragma Unroll does not specify an option.
OptionUnroll = OptionInfo->isStr("unroll");
+ OptionUnrollAndJam = OptionInfo->isStr("unroll_and_jam");
OptionDistribute = OptionInfo->isStr("distribute");
+ OptionPipelineDisabled = OptionInfo->isStr("pipeline");
StateOption = llvm::StringSwitch<bool>(OptionInfo->getName())
.Case("vectorize", true)
.Case("interleave", true)
.Default(false) ||
- OptionUnroll || OptionDistribute;
+ OptionUnroll || OptionUnrollAndJam || OptionDistribute ||
+ OptionPipelineDisabled;
}
- bool AssumeSafetyArg = !OptionUnroll && !OptionDistribute;
+ bool AssumeSafetyArg = !OptionUnroll && !OptionUnrollAndJam &&
+ !OptionDistribute && !OptionPipelineDisabled;
// Verify loop hint has an argument.
if (Toks[0].is(tok::eof)) {
ConsumeAnnotationToken();
Diag(Toks[0].getLocation(), diag::err_pragma_loop_missing_argument)
- << /*StateArgument=*/StateOption << /*FullKeyword=*/OptionUnroll
+ << /*StateArgument=*/StateOption
+ << /*FullKeyword=*/(OptionUnroll || OptionUnrollAndJam)
<< /*AssumeSafetyKeyword=*/AssumeSafetyArg;
return false;
}
@@ -1029,14 +1092,19 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
bool Valid = StateInfo &&
llvm::StringSwitch<bool>(StateInfo->getName())
- .Cases("enable", "disable", true)
- .Case("full", OptionUnroll)
+ .Case("disable", true)
+ .Case("enable", !OptionPipelineDisabled)
+ .Case("full", OptionUnroll || OptionUnrollAndJam)
.Case("assume_safety", AssumeSafetyArg)
.Default(false);
if (!Valid) {
- Diag(Toks[0].getLocation(), diag::err_pragma_invalid_keyword)
- << /*FullKeyword=*/OptionUnroll
- << /*AssumeSafetyKeyword=*/AssumeSafetyArg;
+ if (OptionPipelineDisabled) {
+ Diag(Toks[0].getLocation(), diag::err_pragma_pipeline_invalid_keyword);
+ } else {
+ Diag(Toks[0].getLocation(), diag::err_pragma_invalid_keyword)
+ << /*FullKeyword=*/(OptionUnroll || OptionUnrollAndJam)
+ << /*AssumeSafetyKeyword=*/AssumeSafetyArg;
+ }
return false;
}
if (Toks.size() > 2)
@@ -1076,9 +1144,10 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
namespace {
struct PragmaAttributeInfo {
- enum ActionType { Push, Pop };
+ enum ActionType { Push, Pop, Attribute };
ParsedAttributes &Attributes;
ActionType Action;
+ const IdentifierInfo *Namespace = nullptr;
ArrayRef<Token> Tokens;
PragmaAttributeInfo(ParsedAttributes &Attributes) : Attributes(Attributes) {}
@@ -1333,12 +1402,20 @@ void Parser::HandlePragmaAttribute() {
auto *Info = static_cast<PragmaAttributeInfo *>(Tok.getAnnotationValue());
if (Info->Action == PragmaAttributeInfo::Pop) {
ConsumeAnnotationToken();
- Actions.ActOnPragmaAttributePop(PragmaLoc);
+ Actions.ActOnPragmaAttributePop(PragmaLoc, Info->Namespace);
return;
}
// Parse the actual attribute with its arguments.
- assert(Info->Action == PragmaAttributeInfo::Push &&
+ assert((Info->Action == PragmaAttributeInfo::Push ||
+ Info->Action == PragmaAttributeInfo::Attribute) &&
"Unexpected #pragma attribute command");
+
+ if (Info->Action == PragmaAttributeInfo::Push && Info->Tokens.empty()) {
+ ConsumeAnnotationToken();
+ Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc, Info->Namespace);
+ return;
+ }
+
PP.EnterTokenStream(Info->Tokens, /*DisableMacroExpansion=*/false);
ConsumeAnnotationToken();
@@ -1485,8 +1562,12 @@ void Parser::HandlePragmaAttribute() {
// Consume the eof terminator token.
ConsumeToken();
- Actions.ActOnPragmaAttributePush(Attribute, PragmaLoc,
- std::move(SubjectMatchRules));
+ // Handle a mixed push/attribute by desurging to a push, then an attribute.
+ if (Info->Action == PragmaAttributeInfo::Push)
+ Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc, Info->Namespace);
+
+ Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
+ std::move(SubjectMatchRules));
}
// #pragma GCC visibility comes in two variants:
@@ -2737,6 +2818,8 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
/// 'vectorize_width' '(' loop-hint-value ')'
/// 'interleave_count' '(' loop-hint-value ')'
/// 'unroll_count' '(' loop-hint-value ')'
+/// 'pipeline' '(' disable ')'
+/// 'pipeline_initiation_interval' '(' loop-hint-value ')'
///
/// loop-hint-keyword:
/// 'enable'
@@ -2796,6 +2879,8 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
.Case("vectorize_width", true)
.Case("interleave_count", true)
.Case("unroll_count", true)
+ .Case("pipeline", true)
+ .Case("pipeline_initiation_interval", true)
.Default(false);
if (!OptionValid) {
PP.Diag(Tok.getLocation(), diag::err_pragma_loop_invalid_option)
@@ -2844,6 +2929,10 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP,
/// #pragma unroll unroll-hint-value
/// #pragma unroll '(' unroll-hint-value ')'
/// #pragma nounroll
+/// #pragma unroll_and_jam
+/// #pragma unroll_and_jam unroll-hint-value
+/// #pragma unroll_and_jam '(' unroll-hint-value ')'
+/// #pragma nounroll_and_jam
///
/// unroll-hint-value:
/// constant-expression
@@ -2868,9 +2957,10 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
// nounroll or unroll pragma without an argument.
Info->PragmaName = PragmaName;
Info->Option.startToken();
- } else if (PragmaName.getIdentifierInfo()->getName() == "nounroll") {
+ } else if (PragmaName.getIdentifierInfo()->getName() == "nounroll" ||
+ PragmaName.getIdentifierInfo()->getName() == "nounroll_and_jam") {
PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
- << "nounroll";
+ << PragmaName.getIdentifierInfo()->getName();
return;
} else {
// Unroll pragma with an argument: "#pragma unroll N" or
@@ -3041,10 +3131,22 @@ void PragmaForceCUDAHostDeviceHandler::HandlePragma(
///
/// The syntax is:
/// \code
-/// #pragma clang attribute push(attribute, subject-set)
+/// #pragma clang attribute push (attribute, subject-set)
+/// #pragma clang attribute push
+/// #pragma clang attribute (attribute, subject-set)
/// #pragma clang attribute pop
/// \endcode
///
+/// There are also 'namespace' variants of push and pop directives. The bare
+/// '#pragma clang attribute (attribute, subject-set)' version doesn't require a
+/// namespace, since it always applies attributes to the most recently pushed
+/// group, regardless of namespace.
+/// \code
+/// #pragma clang attribute namespace.push (attribute, subject-set)
+/// #pragma clang attribute namespace.push
+/// #pragma clang attribute namespace.pop
+/// \endcode
+///
/// The subject-set clause defines the set of declarations which receive the
/// attribute. Its exact syntax is described in the LanguageExtensions document
/// in Clang's documentation.
@@ -3060,25 +3162,56 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
auto *Info = new (PP.getPreprocessorAllocator())
PragmaAttributeInfo(AttributesForPragmaAttribute);
- // Parse the 'push' or 'pop'.
- if (Tok.isNot(tok::identifier)) {
- PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_expected_push_pop);
- return;
+ // Parse the optional namespace followed by a period.
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->isStr("push") && !II->isStr("pop")) {
+ Info->Namespace = II;
+ PP.Lex(Tok);
+
+ if (!Tok.is(tok::period)) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_expected_period)
+ << II;
+ return;
+ }
+ PP.Lex(Tok);
+ }
}
- const auto *II = Tok.getIdentifierInfo();
- if (II->isStr("push"))
- Info->Action = PragmaAttributeInfo::Push;
- else if (II->isStr("pop"))
- Info->Action = PragmaAttributeInfo::Pop;
- else {
- PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_invalid_argument)
- << PP.getSpelling(Tok);
+
+ if (!Tok.isOneOf(tok::identifier, tok::l_paren)) {
+ PP.Diag(Tok.getLocation(),
+ diag::err_pragma_attribute_expected_push_pop_paren);
return;
}
- PP.Lex(Tok);
+
+ // Determine what action this pragma clang attribute represents.
+ if (Tok.is(tok::l_paren)) {
+ if (Info->Namespace) {
+ PP.Diag(Tok.getLocation(),
+ diag::err_pragma_attribute_namespace_on_attribute);
+ PP.Diag(Tok.getLocation(),
+ diag::note_pragma_attribute_namespace_on_attribute);
+ return;
+ }
+ Info->Action = PragmaAttributeInfo::Attribute;
+ } else {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("push"))
+ Info->Action = PragmaAttributeInfo::Push;
+ else if (II->isStr("pop"))
+ Info->Action = PragmaAttributeInfo::Pop;
+ else {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_attribute_invalid_argument)
+ << PP.getSpelling(Tok);
+ return;
+ }
+
+ PP.Lex(Tok);
+ }
// Parse the actual attribute.
- if (Info->Action == PragmaAttributeInfo::Push) {
+ if ((Info->Action == PragmaAttributeInfo::Push && Tok.isNot(tok::eod)) ||
+ Info->Action == PragmaAttributeInfo::Attribute) {
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok.getLocation(), diag::err_expected) << tok::l_paren;
return;
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index deb10af4b17b..2974e6a245b0 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -15,10 +15,10 @@
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Parse/LoopHint.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
using namespace clang;
@@ -348,6 +348,11 @@ Retry:
ConsumeAnnotationToken();
return StmtError();
+ case tok::annot_pragma_fenv_access:
+ ProhibitAttributes(Attrs);
+ HandlePragmaFEnvAccess();
+ return StmtEmpty();
+
case tok::annot_pragma_opencl_extension:
ProhibitAttributes(Attrs);
HandlePragmaOpenCLExtension();
@@ -434,7 +439,7 @@ StmtResult Parser::ParseExprStatement() {
// Otherwise, eat the semicolon.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return Actions.ActOnExprStmt(Expr);
+ return Actions.ActOnExprStmt(Expr, isExprValueDiscarded());
}
/// ParseSEHTryBlockCommon
@@ -902,6 +907,9 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
case tok::annot_pragma_fp:
HandlePragmaFP();
break;
+ case tok::annot_pragma_fenv_access:
+ HandlePragmaFEnvAccess();
+ break;
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
break;
@@ -922,6 +930,44 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
}
+/// Consume any extra semi-colons resulting in null statements,
+/// returning true if any tok::semi were consumed.
+bool Parser::ConsumeNullStmt(StmtVector &Stmts) {
+ if (!Tok.is(tok::semi))
+ return false;
+
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ while (Tok.is(tok::semi) && !Tok.hasLeadingEmptyMacro() &&
+ Tok.getLocation().isValid() && !Tok.getLocation().isMacroID()) {
+ EndLoc = Tok.getLocation();
+
+ // Don't just ConsumeToken() this tok::semi, do store it in AST.
+ StmtResult R = ParseStatementOrDeclaration(Stmts, ACK_Any);
+ if (R.isUsable())
+ Stmts.push_back(R.get());
+ }
+
+ // Did not consume any extra semi.
+ if (EndLoc.isInvalid())
+ return false;
+
+ Diag(StartLoc, diag::warn_null_statement)
+ << FixItHint::CreateRemoval(SourceRange(StartLoc, EndLoc));
+ return true;
+}
+
+bool Parser::isExprValueDiscarded() {
+ if (Actions.isCurCompoundStmtAStmtExpr()) {
+ // Look to see if the next two tokens close the statement expression;
+ // if so, this expression statement is the last statement in a
+ // statment expression.
+ return Tok.isNot(tok::r_brace) || NextToken().isNot(tok::r_paren);
+ }
+ return true;
+}
+
/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
/// consume the '}' at the end of the block. It does not manipulate the scope
@@ -984,6 +1030,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
continue;
}
+ if (ConsumeNullStmt(Stmts))
+ continue;
+
StmtResult R;
if (Tok.isNot(tok::kw___extension__)) {
R = ParseStatementOrDeclaration(Stmts, ACK_Any);
@@ -1023,7 +1072,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
// Eat the semicolon at the end of stmt and convert the expr into a
// statement.
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- R = Actions.ActOnExprStmt(Res);
+ R = Actions.ActOnExprStmt(Res, isExprValueDiscarded());
}
}
@@ -1534,7 +1583,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus ||
- getLangOpts().ObjC1;
+ getLangOpts().ObjC;
// C99 6.8.5p5 - In C99, the for statement is a block. This is not
// the case for C90. Start the loop scope.
@@ -1562,11 +1611,11 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ExprResult Value;
- bool ForEach = false, ForRange = false;
+ bool ForEach = false;
StmtResult FirstPart;
Sema::ConditionResult SecondPart;
ExprResult Collection;
- ForRangeInit ForRangeInit;
+ ForRangeInfo ForRangeInfo;
FullExprArg ThirdPart(Actions);
if (Tok.is(tok::code_completion)) {
@@ -1580,10 +1629,15 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
+ SourceLocation EmptyInitStmtSemiLoc;
+
// Parse the first part of the for specifier.
if (Tok.is(tok::semi)) { // for (;
ProhibitAttributes(attrs);
// no first part, eat the ';'.
+ SourceLocation SemiLoc = Tok.getLocation();
+ if (!Tok.hasLeadingEmptyMacro() && !SemiLoc.isMacroID())
+ EmptyInitStmtSemiLoc = SemiLoc;
ConsumeToken();
} else if (getLangOpts().CPlusPlus && Tok.is(tok::identifier) &&
isForRangeIdentifier()) {
@@ -1592,20 +1646,19 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
SourceLocation Loc = ConsumeToken();
MaybeParseCXX11Attributes(attrs);
- ForRangeInit.ColonLoc = ConsumeToken();
+ ForRangeInfo.ColonLoc = ConsumeToken();
if (Tok.is(tok::l_brace))
- ForRangeInit.RangeExpr = ParseBraceInitializer();
+ ForRangeInfo.RangeExpr = ParseBraceInitializer();
else
- ForRangeInit.RangeExpr = ParseExpression();
+ ForRangeInfo.RangeExpr = ParseExpression();
Diag(Loc, diag::err_for_range_identifier)
<< ((getLangOpts().CPlusPlus11 && !getLangOpts().CPlusPlus17)
? FixItHint::CreateInsertion(Loc, "auto &&")
: FixItHint());
- FirstPart = Actions.ActOnCXXForRangeIdentifier(getCurScope(), Loc, Name,
- attrs, attrs.Range.getEnd());
- ForRange = true;
+ ForRangeInfo.LoopVar = Actions.ActOnCXXForRangeIdentifier(
+ getCurScope(), Loc, Name, attrs, attrs.Range.getEnd());
} else if (isForInitDeclaration()) { // for (int X = 4;
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1622,13 +1675,13 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy DG = ParseSimpleDeclaration(
DeclaratorContext::ForContext, DeclEnd, attrs, false,
- MightBeForRangeStmt ? &ForRangeInit : nullptr);
+ MightBeForRangeStmt ? &ForRangeInfo : nullptr);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
- if (ForRangeInit.ParsedForRangeDecl()) {
- Diag(ForRangeInit.ColonLoc, getLangOpts().CPlusPlus11 ?
+ if (ForRangeInfo.ParsedForRangeDecl()) {
+ Diag(ForRangeInfo.ColonLoc, getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_for_range : diag::ext_for_range);
-
- ForRange = true;
+ ForRangeInfo.LoopVar = FirstPart;
+ FirstPart = StmtResult();
} else if (Tok.is(tok::semi)) { // for (int x = 4;
ConsumeToken();
} else if ((ForEach = isTokIdentifier_in())) {
@@ -1655,8 +1708,16 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (!Value.isInvalid()) {
if (ForEach)
FirstPart = Actions.ActOnForEachLValueExpr(Value.get());
- else
- FirstPart = Actions.ActOnExprStmt(Value);
+ else {
+ // We already know this is not an init-statement within a for loop, so
+ // if we are parsing a C++11 range-based for loop, we should treat this
+ // expression statement as being a discarded value expression because
+ // we will err below. This way we do not warn on an unused expression
+ // that was an error in the first place, like with: for (expr : expr);
+ bool IsRangeBasedFor =
+ getLangOpts().CPlusPlus11 && !ForEach && Tok.is(tok::colon);
+ FirstPart = Actions.ActOnExprStmt(Value, !IsRangeBasedFor);
+ }
}
if (Tok.is(tok::semi)) {
@@ -1691,17 +1752,38 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// Parse the second part of the for specifier.
getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
- if (!ForEach && !ForRange && !SecondPart.isInvalid()) {
+ if (!ForEach && !ForRangeInfo.ParsedForRangeDecl() &&
+ !SecondPart.isInvalid()) {
// Parse the second part of the for specifier.
if (Tok.is(tok::semi)) { // for (...;;
// no second part.
} else if (Tok.is(tok::r_paren)) {
// missing both semicolons.
} else {
- if (getLangOpts().CPlusPlus)
+ if (getLangOpts().CPlusPlus) {
+ // C++2a: We've parsed an init-statement; we might have a
+ // for-range-declaration next.
+ bool MightBeForRangeStmt = !ForRangeInfo.ParsedForRangeDecl();
+ ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SecondPart =
- ParseCXXCondition(nullptr, ForLoc, Sema::ConditionKind::Boolean);
- else {
+ ParseCXXCondition(nullptr, ForLoc, Sema::ConditionKind::Boolean,
+ MightBeForRangeStmt ? &ForRangeInfo : nullptr);
+
+ if (ForRangeInfo.ParsedForRangeDecl()) {
+ Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
+ : ForRangeInfo.ColonLoc,
+ getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_for_range_init_stmt
+ : diag::ext_for_range_init_stmt)
+ << (FirstPart.get() ? FirstPart.get()->getSourceRange()
+ : SourceRange());
+ if (EmptyInitStmtSemiLoc.isValid()) {
+ Diag(EmptyInitStmtSemiLoc, diag::warn_empty_init_statement)
+ << /*for-loop*/ 2
+ << FixItHint::CreateRemoval(EmptyInitStmtSemiLoc);
+ }
+ }
+ } else {
ExprResult SecondExpr = ParseExpression();
if (SecondExpr.isInvalid())
SecondPart = Sema::ConditionError();
@@ -1711,7 +1793,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
Sema::ConditionKind::Boolean);
}
}
+ }
+ // Parse the third part of the for statement.
+ if (!ForEach && !ForRangeInfo.ParsedForRangeDecl()) {
if (Tok.isNot(tok::semi)) {
if (!SecondPart.isInvalid())
Diag(Tok, diag::err_expected_semi_for);
@@ -1724,7 +1809,6 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ConsumeToken();
}
- // Parse the third part of the for specifier.
if (Tok.isNot(tok::r_paren)) { // for (...;...;)
ExprResult Third = ParseExpression();
// FIXME: The C++11 standard doesn't actually say that this is a
@@ -1737,7 +1821,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// C++ Coroutines [stmt.iter]:
// 'co_await' can only be used for a range-based for statement.
- if (CoawaitLoc.isValid() && !ForRange) {
+ if (CoawaitLoc.isValid() && !ForRangeInfo.ParsedForRangeDecl()) {
Diag(CoawaitLoc, diag::err_for_co_await_not_range_for);
CoawaitLoc = SourceLocation();
}
@@ -1748,12 +1832,12 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
StmtResult ForRangeStmt;
StmtResult ForEachStmt;
- if (ForRange) {
+ if (ForRangeInfo.ParsedForRangeDecl()) {
ExprResult CorrectedRange =
- Actions.CorrectDelayedTyposInExpr(ForRangeInit.RangeExpr.get());
+ Actions.CorrectDelayedTyposInExpr(ForRangeInfo.RangeExpr.get());
ForRangeStmt = Actions.ActOnCXXForRangeStmt(
getCurScope(), ForLoc, CoawaitLoc, FirstPart.get(),
- ForRangeInit.ColonLoc, CorrectedRange.get(),
+ ForRangeInfo.LoopVar.get(), ForRangeInfo.ColonLoc, CorrectedRange.get(),
T.getCloseLocation(), Sema::BFRK_Build);
// Similarly, we need to do the semantic analysis for a for-range
@@ -1808,7 +1892,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return Actions.FinishObjCForCollectionStmt(ForEachStmt.get(),
Body.get());
- if (ForRange)
+ if (ForRangeInfo.ParsedForRangeDecl())
return Actions.FinishCXXForRangeStmt(ForRangeStmt.get(), Body.get());
return Actions.ActOnForStmt(ForLoc, T.getOpenLocation(), FirstPart.get(),
@@ -1897,10 +1981,11 @@ StmtResult Parser::ParseReturnStatement() {
if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus) {
R = ParseInitializer();
if (R.isUsable())
- Diag(R.get()->getLocStart(), getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_generalized_initializer_lists :
- diag::ext_generalized_initializer_lists)
- << R.get()->getSourceRange();
+ Diag(R.get()->getBeginLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_generalized_initializer_lists
+ : diag::ext_generalized_initializer_lists)
+ << R.get()->getSourceRange();
} else
R = ParseExpression();
if (R.isInvalid()) {
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 063f7ccea320..e0a7cc6e856d 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -425,7 +425,9 @@ bool Parser::isStartOfTemplateTypeParameter() {
}
}
- if (Tok.isNot(tok::kw_typename))
+ // 'typedef' is a reasonably-common typo/thinko for 'typename', and is
+ // ill-formed otherwise.
+ if (Tok.isNot(tok::kw_typename) && Tok.isNot(tok::kw_typedef))
return false;
// C++ [temp.param]p2:
@@ -448,6 +450,13 @@ bool Parser::isStartOfTemplateTypeParameter() {
case tok::ellipsis:
return true;
+ case tok::kw_typename:
+ case tok::kw_typedef:
+ case tok::kw_class:
+ // These indicate that a comma was missed after a type parameter, not that
+ // we have found a non-type parameter.
+ return true;
+
default:
return false;
}
@@ -469,26 +478,25 @@ bool Parser::isStartOfTemplateTypeParameter() {
/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
/// = id-expression
NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
- if (isStartOfTemplateTypeParameter())
- return ParseTypeParameter(Depth, Position);
-
- if (Tok.is(tok::kw_template))
- return ParseTemplateTemplateParameter(Depth, Position);
+ if (isStartOfTemplateTypeParameter()) {
+ // Is there just a typo in the input code? ('typedef' instead of 'typename')
+ if (Tok.is(tok::kw_typedef)) {
+ Diag(Tok.getLocation(), diag::err_expected_template_parameter);
- // Is there just a typo in the input code? ('typedef' instead of 'typename')
- if (Tok.is(tok::kw_typedef)) {
- Diag(Tok.getLocation(), diag::err_expected_template_parameter);
-
- Diag(Tok.getLocation(), diag::note_meant_to_use_typename)
- << FixItHint::CreateReplacement(CharSourceRange::getCharRange(
- Tok.getLocation(), Tok.getEndLoc()),
- "typename");
+ Diag(Tok.getLocation(), diag::note_meant_to_use_typename)
+ << FixItHint::CreateReplacement(CharSourceRange::getCharRange(
+ Tok.getLocation(), Tok.getEndLoc()),
+ "typename");
- Tok.setKind(tok::kw_typename);
+ Tok.setKind(tok::kw_typename);
+ }
return ParseTypeParameter(Depth, Position);
}
+ if (Tok.is(tok::kw_template))
+ return ParseTemplateTemplateParameter(Depth, Position);
+
// If it's none of the above, then it must be a parameter declaration.
// NOTE: This will pick up errors in the closure of the template parameter
// list (e.g., template < ; Check here to implement >> style closures.
@@ -938,7 +946,9 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
bool Invalid = false;
{
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater))
+ if (!Tok.isOneOf(tok::greater, tok::greatergreater,
+ tok::greatergreatergreater, tok::greaterequal,
+ tok::greatergreaterequal))
Invalid = ParseTemplateArgumentList(TemplateArgs);
if (Invalid) {
@@ -1371,26 +1381,37 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
SmallVector<ParseScope*, 4> TemplateParamScopeStack;
- // Get the list of DeclContexts to reenter.
- SmallVector<DeclContext*, 4> DeclContextsToReenter;
+ // Get the list of DeclContexts to reenter. For inline methods, we only want
+ // to push the DeclContext of the outermost class. This matches the way the
+ // parser normally parses bodies of inline methods when the outermost class is
+ // complete.
+ struct ContainingDC {
+ ContainingDC(DeclContext *DC, bool ShouldPush) : Pair(DC, ShouldPush) {}
+ llvm::PointerIntPair<DeclContext *, 1, bool> Pair;
+ DeclContext *getDC() { return Pair.getPointer(); }
+ bool shouldPushDC() { return Pair.getInt(); }
+ };
+ SmallVector<ContainingDC, 4> DeclContextsToReenter;
DeclContext *DD = FunD;
+ DeclContext *NextContaining = Actions.getContainingDC(DD);
while (DD && !DD->isTranslationUnit()) {
- DeclContextsToReenter.push_back(DD);
+ bool ShouldPush = DD == NextContaining;
+ DeclContextsToReenter.push_back({DD, ShouldPush});
+ if (ShouldPush)
+ NextContaining = Actions.getContainingDC(DD);
DD = DD->getLexicalParent();
}
// Reenter template scopes from outermost to innermost.
- SmallVectorImpl<DeclContext *>::reverse_iterator II =
- DeclContextsToReenter.rbegin();
- for (; II != DeclContextsToReenter.rend(); ++II) {
- TemplateParamScopeStack.push_back(new ParseScope(this,
- Scope::TemplateParamScope));
- unsigned NumParamLists =
- Actions.ActOnReenterTemplateScope(getCurScope(), cast<Decl>(*II));
+ for (ContainingDC CDC : reverse(DeclContextsToReenter)) {
+ TemplateParamScopeStack.push_back(
+ new ParseScope(this, Scope::TemplateParamScope));
+ unsigned NumParamLists = Actions.ActOnReenterTemplateScope(
+ getCurScope(), cast<Decl>(CDC.getDC()));
CurTemplateDepthTracker.addDepth(NumParamLists);
- if (*II != FunD) {
+ if (CDC.shouldPushDC()) {
TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
- Actions.PushDeclContext(Actions.getCurScope(), *II);
+ Actions.PushDeclContext(Actions.getCurScope(), CDC.getDC());
}
}
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index dfd1f8c3b2e6..de39e0675fdb 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -159,7 +159,7 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
ConsumeToken();
break;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case tok::kw_typeof:
case tok::kw___attribute:
case tok::kw___underlying_type: {
@@ -219,11 +219,11 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
case tok::annot_cxxscope:
ConsumeAnnotationToken();
- // Fall through.
+ LLVM_FALLTHROUGH;
default:
ConsumeAnyToken();
- if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ if (getLangOpts().ObjC && Tok.is(tok::less))
return TryParseProtocolQualifiers();
break;
}
@@ -345,22 +345,55 @@ struct Parser::ConditionDeclarationOrInitStatementState {
bool CanBeExpression = true;
bool CanBeCondition = true;
bool CanBeInitStatement;
+ bool CanBeForRangeDecl;
+
+ ConditionDeclarationOrInitStatementState(Parser &P, bool CanBeInitStatement,
+ bool CanBeForRangeDecl)
+ : P(P), CanBeInitStatement(CanBeInitStatement),
+ CanBeForRangeDecl(CanBeForRangeDecl) {}
- ConditionDeclarationOrInitStatementState(Parser &P, bool CanBeInitStatement)
- : P(P), CanBeInitStatement(CanBeInitStatement) {}
+ bool resolved() {
+ return CanBeExpression + CanBeCondition + CanBeInitStatement +
+ CanBeForRangeDecl < 2;
+ }
void markNotExpression() {
CanBeExpression = false;
- if (CanBeCondition && CanBeInitStatement) {
+ if (!resolved()) {
// FIXME: Unify the parsing codepaths for condition variables and
// simple-declarations so that we don't need to eagerly figure out which
// kind we have here. (Just parse init-declarators until we reach a
// semicolon or right paren.)
RevertingTentativeParsingAction PA(P);
- P.SkipUntil(tok::r_paren, tok::semi, StopBeforeMatch);
+ if (CanBeForRangeDecl) {
+ // Skip until we hit a ')', ';', or a ':' with no matching '?'.
+ // The final case is a for range declaration, the rest are not.
+ while (true) {
+ unsigned QuestionColonDepth = 0;
+ P.SkipUntil({tok::r_paren, tok::semi, tok::question, tok::colon},
+ StopBeforeMatch);
+ if (P.Tok.is(tok::question))
+ ++QuestionColonDepth;
+ else if (P.Tok.is(tok::colon)) {
+ if (QuestionColonDepth)
+ --QuestionColonDepth;
+ else {
+ CanBeCondition = CanBeInitStatement = false;
+ return;
+ }
+ } else {
+ CanBeForRangeDecl = false;
+ break;
+ }
+ P.ConsumeToken();
+ }
+ } else {
+ // Just skip until we hit a ')' or ';'.
+ P.SkipUntil(tok::r_paren, tok::semi, StopBeforeMatch);
+ }
if (P.Tok.isNot(tok::r_paren))
- CanBeCondition = false;
+ CanBeCondition = CanBeForRangeDecl = false;
if (P.Tok.isNot(tok::semi))
CanBeInitStatement = false;
}
@@ -368,28 +401,36 @@ struct Parser::ConditionDeclarationOrInitStatementState {
bool markNotCondition() {
CanBeCondition = false;
- return !CanBeInitStatement || !CanBeExpression;
+ return resolved();
+ }
+
+ bool markNotForRangeDecl() {
+ CanBeForRangeDecl = false;
+ return resolved();
}
bool update(TPResult IsDecl) {
switch (IsDecl) {
case TPResult::True:
markNotExpression();
- return true;
+ assert(resolved() && "can't continue after tentative parsing bails out");
+ break;
case TPResult::False:
- CanBeCondition = CanBeInitStatement = false;
- return true;
+ CanBeCondition = CanBeInitStatement = CanBeForRangeDecl = false;
+ break;
case TPResult::Ambiguous:
- return false;
+ break;
case TPResult::Error:
- CanBeExpression = CanBeCondition = CanBeInitStatement = false;
- return true;
+ CanBeExpression = CanBeCondition = CanBeInitStatement =
+ CanBeForRangeDecl = false;
+ break;
}
- llvm_unreachable("unknown tentative parse result");
+ return resolved();
}
ConditionOrInitStatement result() const {
- assert(CanBeExpression + CanBeCondition + CanBeInitStatement < 2 &&
+ assert(CanBeExpression + CanBeCondition + CanBeInitStatement +
+ CanBeForRangeDecl < 2 &&
"result called but not yet resolved");
if (CanBeExpression)
return ConditionOrInitStatement::Expression;
@@ -397,6 +438,8 @@ struct Parser::ConditionDeclarationOrInitStatementState {
return ConditionOrInitStatement::ConditionDecl;
if (CanBeInitStatement)
return ConditionOrInitStatement::InitStmtDecl;
+ if (CanBeForRangeDecl)
+ return ConditionOrInitStatement::ForRangeDecl;
return ConditionOrInitStatement::Error;
}
};
@@ -419,8 +462,10 @@ struct Parser::ConditionDeclarationOrInitStatementState {
/// to the ';' to disambiguate cases like 'int(x))' (an expression) from
/// 'int(x);' (a simple-declaration in an init-statement).
Parser::ConditionOrInitStatement
-Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement) {
- ConditionDeclarationOrInitStatementState State(*this, CanBeInitStatement);
+Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement,
+ bool CanBeForRangeDecl) {
+ ConditionDeclarationOrInitStatementState State(*this, CanBeInitStatement,
+ CanBeForRangeDecl);
if (State.update(isCXXDeclarationSpecifier()))
return State.result();
@@ -447,11 +492,19 @@ Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement) {
return State.result();
}
+ // A colon here identifies a for-range declaration.
+ if (State.CanBeForRangeDecl && Tok.is(tok::colon))
+ return ConditionOrInitStatement::ForRangeDecl;
+
// At this point, it can't be a condition any more, because a condition
// must have a brace-or-equal-initializer.
if (State.markNotCondition())
return State.result();
+ // Likewise, it can't be a for-range declaration any more.
+ if (State.markNotForRangeDecl())
+ return State.result();
+
// A parenthesized initializer could be part of an expression or a
// simple-declaration.
if (Tok.is(tok::l_paren)) {
@@ -596,7 +649,7 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
return CAK_NotAttributeSpecifier;
// No tentative parsing if we don't need to look for ']]' or a lambda.
- if (!Disambiguate && !getLangOpts().ObjC1)
+ if (!Disambiguate && !getLangOpts().ObjC)
return CAK_AttributeSpecifier;
RevertingTentativeParsingAction PA(*this);
@@ -605,7 +658,7 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
ConsumeBracket();
// Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
- if (!getLangOpts().ObjC1) {
+ if (!getLangOpts().ObjC) {
ConsumeBracket();
bool IsAttribute = SkipUntil(tok::r_square);
@@ -1107,8 +1160,8 @@ public:
// Reject any candidate that only resolves to instance members since they
// aren't viable as standalone identifiers instead of member references.
if (Candidate.isResolved() && !Candidate.isKeyword() &&
- std::all_of(Candidate.begin(), Candidate.end(),
- [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
+ llvm::all_of(Candidate,
+ [](NamedDecl *ND) { return ND->isCXXInstanceMember(); }))
return false;
return CorrectionCandidateCallback::ValidateCandidate(Candidate);
@@ -1233,7 +1286,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
const Token &Next = NextToken();
// In 'foo bar', 'foo' is always a type name outside of Objective-C.
- if (!getLangOpts().ObjC1 && Next.is(tok::identifier))
+ if (!getLangOpts().ObjC && Next.is(tok::identifier))
return TPResult::True;
if (Next.isNot(tok::coloncolon) && Next.isNot(tok::less)) {
@@ -1299,8 +1352,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
if (Next.isOneOf(tok::kw_new, // ::new
tok::kw_delete)) // ::delete
return TPResult::False;
+ LLVM_FALLTHROUGH;
}
- // Fall through.
case tok::kw___super:
case tok::kw_decltype:
// Annotate typenames and C++ scope specifiers. If we get one, just
@@ -1506,7 +1559,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::annot_typename:
case_typename:
// In Objective-C, we might have a protocol-qualified type.
- if (getLangOpts().ObjC1 && NextToken().is(tok::less)) {
+ if (getLangOpts().ObjC && NextToken().is(tok::less)) {
// Tentatively parse the protocol qualifiers.
RevertingTentativeParsingAction PA(*this);
ConsumeAnyToken(); // The type token
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index c3085654f529..a93db799f8fe 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -341,7 +341,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
case tok::semi:
if (HasFlagsSet(Flags, StopAtSemi))
return false;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
default:
// Skip this token.
ConsumeAnyToken();
@@ -443,7 +443,7 @@ void Parser::Initialize() {
// Initialization for Objective-C context sensitive keywords recognition.
// Referenced in Parser::ParseObjCTypeQualifierList.
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
@@ -674,6 +674,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_fp_contract:
HandlePragmaFPContract();
return nullptr;
+ case tok::annot_pragma_fenv_access:
+ HandlePragmaFEnvAccess();
+ return nullptr;
case tok::annot_pragma_fp:
HandlePragmaFP();
break;
@@ -744,7 +747,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
return ParseObjCAtDirectives(attrs);
case tok::minus:
case tok::plus:
- if (!getLangOpts().ObjC1) {
+ if (!getLangOpts().ObjC) {
Diag(Tok, diag::err_expected_external_declaration);
ConsumeToken();
return nullptr;
@@ -975,7 +978,7 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
// attributes here, no types, etc.
- if (getLangOpts().ObjC2 && Tok.is(tok::at)) {
+ if (getLangOpts().ObjC && Tok.is(tok::at)) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
!Tok.isObjCAtKeyword(tok::objc_protocol)) {
@@ -1519,7 +1522,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
// Look up and classify the identifier. We don't perform any typo-correction
// after a scope specifier, because in general we can't recover from typos
- // there (eg, after correcting 'A::tempalte B<X>::C' [sic], we would need to
+ // there (eg, after correcting 'A::template B<X>::C' [sic], we would need to
// jump back into scope specifier parsing).
Sema::NameClassification Classification = Actions.ClassifyName(
getCurScope(), SS, Name, NameLoc, Next, IsAddressOfOperand,
@@ -1551,7 +1554,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
ParsedType Ty = Classification.getType();
- if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ if (getLangOpts().ObjC && NextToken().is(tok::less) &&
(Ty.get()->isObjCObjectType() ||
Ty.get()->isObjCObjectPointerType())) {
// Consume the name.
@@ -1591,7 +1594,7 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
AnnotateScopeToken(SS, !WasScopeAnnotation);
return ANK_TemplateName;
}
- // Fall through.
+ LLVM_FALLTHROUGH;
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate: {
// We have a type, variable or function template followed by '<'.
@@ -1778,7 +1781,7 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
/// An Objective-C object type followed by '<' is a specialization of
/// a parameterized class type or a protocol-qualified type.
- if (getLangOpts().ObjC1 && NextToken().is(tok::less) &&
+ if (getLangOpts().ObjC && NextToken().is(tok::less) &&
(Ty.get()->isObjCObjectType() ||
Ty.get()->isObjCObjectPointerType())) {
// Consume the name.
@@ -1964,6 +1967,10 @@ void Parser::CodeCompleteMacroArgument(IdentifierInfo *Macro,
ArgumentIndex);
}
+void Parser::CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) {
+ Actions.CodeCompleteIncludedFile(Dir, IsAngled);
+}
+
void Parser::CodeCompleteNaturalLanguage() {
Actions.CodeCompleteNaturalLanguage();
}
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 12d7a16a2fc8..2088d4571aad 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -477,7 +477,7 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
// Chop off the L, u, U or 8 prefix
++TokOffs;
--TokLen;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
case tok::string_literal:
// FIXME: Exclude the optional ud-suffix from the highlighted range.
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
index 5bc79f3eddc9..e3b47a1c52f8 100644
--- a/lib/Rewrite/RewriteRope.cpp
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -59,7 +59,7 @@ using namespace clang;
///
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
-/// RopePieces contatenated.
+/// RopePieces concatenated.
/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
/// up to '2*WidthFactor' other nodes in the tree.
@@ -128,7 +128,7 @@ namespace {
/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
/// nodes. This directly represents a chunk of the string with those
- /// RopePieces contatenated. Since this is a B+Tree, all values (in this case
+ /// RopePieces concatenated. Since this is a B+Tree, all values (in this case
/// instances of RopePiece) are stored in leaves like this. To make iteration
/// over the leaves efficient, they maintain a singly linked list through the
/// NextLeaf field. This allows the B+Tree forward iterator to be constant
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index ed240f4ed292..c818d40c7771 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -114,7 +114,7 @@ static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
//
// Note that this is also a performance optimization. Analyzing
// headers many times can be expensive.
- if (!S.getSourceManager().isInMainFile(AC.getDecl()->getLocStart()))
+ if (!S.getSourceManager().isInMainFile(AC.getDecl()->getBeginLoc()))
return;
UnreachableCodeHandler UC(S);
@@ -252,7 +252,7 @@ static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD,
// Emit diagnostic if a recursive function call is detected for all paths.
if (checkForRecursiveFunctionCall(FD, cfg))
- S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function);
+ S.Diag(Body->getBeginLoc(), diag::warn_infinite_recursive_function);
}
//===----------------------------------------------------------------------===//
@@ -651,7 +651,7 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
// Short circuit for compilation speed.
if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
return;
- SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd();
+ SourceLocation LBrace = Body->getBeginLoc(), RBrace = Body->getEndLoc();
auto EmitDiag = [&](SourceLocation Loc, unsigned DiagID) {
if (IsCoroutine)
S.Diag(Loc, DiagID) << FSI->CoroutinePromise->getType();
@@ -748,10 +748,10 @@ static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
return false;
// Don't suggest a fixit inside macros.
- if (VD->getLocEnd().isMacroID())
+ if (VD->getEndLoc().isMacroID())
return false;
- SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd());
+ SourceLocation Loc = S.getLocForEndOfToken(VD->getEndLoc());
// Suggest possible initialization (if any).
std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc);
@@ -771,19 +771,17 @@ static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then,
if (CondVal) {
// If condition is always true, remove all but the 'then'.
Fixit1 = FixItHint::CreateRemoval(
- CharSourceRange::getCharRange(If->getLocStart(),
- Then->getLocStart()));
+ CharSourceRange::getCharRange(If->getBeginLoc(), Then->getBeginLoc()));
if (Else) {
- SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getLocEnd());
- Fixit2 = FixItHint::CreateRemoval(
- SourceRange(ElseKwLoc, Else->getLocEnd()));
+ SourceLocation ElseKwLoc = S.getLocForEndOfToken(Then->getEndLoc());
+ Fixit2 =
+ FixItHint::CreateRemoval(SourceRange(ElseKwLoc, Else->getEndLoc()));
}
} else {
// If condition is always false, remove all but the 'else'.
if (Else)
- Fixit1 = FixItHint::CreateRemoval(
- CharSourceRange::getCharRange(If->getLocStart(),
- Else->getLocStart()));
+ Fixit1 = FixItHint::CreateRemoval(CharSourceRange::getCharRange(
+ If->getBeginLoc(), Else->getBeginLoc()));
else
Fixit1 = FixItHint::CreateRemoval(If->getSourceRange());
}
@@ -797,7 +795,7 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
switch (Use.getKind()) {
case UninitUse::Always:
- S.Diag(Use.getUser()->getLocStart(), diag::warn_uninit_var)
+ S.Diag(Use.getUser()->getBeginLoc(), diag::warn_uninit_var)
<< VD->getDeclName() << IsCapturedByBlock
<< Use.getUser()->getSourceRange();
return;
@@ -809,8 +807,8 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
<< (Use.getKind() == UninitUse::AfterDecl ? 4 : 5)
<< const_cast<DeclContext*>(VD->getLexicalDeclContext())
<< VD->getSourceRange();
- S.Diag(Use.getUser()->getLocStart(), diag::note_uninit_var_use)
- << IsCapturedByBlock << Use.getUser()->getSourceRange();
+ S.Diag(Use.getUser()->getBeginLoc(), diag::note_uninit_var_use)
+ << IsCapturedByBlock << Use.getUser()->getSourceRange();
return;
case UninitUse::Maybe:
@@ -880,8 +878,8 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
if ((BO->getOpcode() == BO_LAnd && I->Output) ||
(BO->getOpcode() == BO_LOr && !I->Output))
// true && y -> y, false || y -> y.
- Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(),
- BO->getOperatorLoc()));
+ Fixit1 = FixItHint::CreateRemoval(
+ SourceRange(BO->getBeginLoc(), BO->getOperatorLoc()));
else
// false && y -> false, true || y -> true.
Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr);
@@ -943,8 +941,8 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
S.Diag(Range.getBegin(), diag::warn_sometimes_uninit_var)
<< VD->getDeclName() << IsCapturedByBlock << DiagKind
<< Str << I->Output << Range;
- S.Diag(User->getLocStart(), diag::note_uninit_var_use)
- << IsCapturedByBlock << User->getSourceRange();
+ S.Diag(User->getBeginLoc(), diag::note_uninit_var_use)
+ << IsCapturedByBlock << User->getSourceRange();
if (RemoveDiagKind != -1)
S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond)
<< RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2;
@@ -953,7 +951,7 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use,
}
if (!Diagnosed)
- S.Diag(Use.getUser()->getLocStart(), diag::warn_maybe_uninit_var)
+ S.Diag(Use.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var)
<< VD->getDeclName() << IsCapturedByBlock
<< Use.getUser()->getSourceRange();
}
@@ -985,9 +983,8 @@ static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
ContainsReference CR(S.Context, DRE);
CR.Visit(Initializer);
if (CR.doesContainReference()) {
- S.Diag(DRE->getLocStart(),
- diag::warn_uninit_self_reference_in_init)
- << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
+ S.Diag(DRE->getBeginLoc(), diag::warn_uninit_self_reference_in_init)
+ << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
return true;
}
}
@@ -996,9 +993,9 @@ static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
} else {
const BlockExpr *BE = cast<BlockExpr>(Use.getUser());
if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>())
- S.Diag(BE->getLocStart(),
+ S.Diag(BE->getBeginLoc(),
diag::warn_uninit_byref_blockvar_captured_by_block)
- << VD->getDeclName();
+ << VD->getDeclName();
else
DiagUninitUse(S, VD, Use, true);
}
@@ -1007,8 +1004,8 @@ static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
// the initializer of that declaration & we didn't already suggest
// an initialization fixit.
if (!SuggestInitializationFixit(S, VD))
- S.Diag(VD->getLocStart(), diag::note_var_declared_here)
- << VD->getDeclName();
+ S.Diag(VD->getBeginLoc(), diag::note_var_declared_here)
+ << VD->getDeclName();
return true;
}
@@ -1098,7 +1095,7 @@ namespace {
// attribute in template instantiations as it may not be
// unreachable in all instantiations of the template.
if (!IsTemplateInstantiation)
- S.Diag(AS->getLocStart(),
+ S.Diag(AS->getBeginLoc(),
diag::warn_fallthrough_attr_unreachable);
markFallthroughVisited(AS);
++AnnotatedCnt;
@@ -1156,7 +1153,12 @@ namespace {
bool TraverseDecl(Decl *D) { return true; }
// We analyze lambda bodies separately. Skip them here.
- bool TraverseLambdaBody(LambdaExpr *LE) { return true; }
+ bool TraverseLambdaExpr(LambdaExpr *LE) {
+ // Traverse the captures, but not the body.
+ for (const auto &C : zip(LE->captures(), LE->capture_inits()))
+ TraverseLambdaCapture(LE, &std::get<0>(C), std::get<1>(C));
+ return true;
+ }
private:
@@ -1266,12 +1268,12 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
IsTemplateInstantiation))
continue;
- S.Diag(Label->getLocStart(),
- PerFunction ? diag::warn_unannotated_fallthrough_per_function
- : diag::warn_unannotated_fallthrough);
+ S.Diag(Label->getBeginLoc(),
+ PerFunction ? diag::warn_unannotated_fallthrough_per_function
+ : diag::warn_unannotated_fallthrough);
if (!AnnotatedCnt) {
- SourceLocation L = Label->getLocStart();
+ SourceLocation L = Label->getBeginLoc();
if (L.isMacroID())
continue;
if (S.getLangOpts().CPlusPlus11) {
@@ -1297,7 +1299,7 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
}
for (const auto *F : FM.getFallthroughStmts())
- S.Diag(F->getLocStart(), diag::err_fallthrough_attr_invalid_placement);
+ S.Diag(F->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement);
}
static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
@@ -1312,11 +1314,10 @@ static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM,
case Stmt::ObjCForCollectionStmtClass:
return true;
case Stmt::DoStmtClass: {
- const Expr *Cond = cast<DoStmt>(S)->getCond();
- llvm::APSInt Val;
- if (!Cond->EvaluateAsInt(Val, Ctx))
+ Expr::EvalResult Result;
+ if (!cast<DoStmt>(S)->getCond()->EvaluateAsInt(Result, Ctx))
return true;
- return Val.getBoolValue();
+ return Result.Val.getInt().getBoolValue();
}
default:
break;
@@ -1394,11 +1395,11 @@ static void diagnoseRepeatedUseOfWeak(Sema &S,
// Sort by first use so that we emit the warnings in a deterministic order.
SourceManager &SM = S.getSourceManager();
- llvm::sort(UsesByStmt.begin(), UsesByStmt.end(),
+ llvm::sort(UsesByStmt,
[&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) {
- return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(),
- RHS.first->getLocStart());
- });
+ return SM.isBeforeInTranslationUnit(LHS.first->getBeginLoc(),
+ RHS.first->getBeginLoc());
+ });
// Classify the current code body for better warning text.
// This enum should stay in sync with the cases in
@@ -1467,15 +1468,15 @@ static void diagnoseRepeatedUseOfWeak(Sema &S,
continue;
// Show the first time the object was read.
- S.Diag(FirstRead->getLocStart(), DiagKind)
- << int(ObjectKind) << KeyProp << int(FunctionKind)
- << FirstRead->getSourceRange();
+ S.Diag(FirstRead->getBeginLoc(), DiagKind)
+ << int(ObjectKind) << KeyProp << int(FunctionKind)
+ << FirstRead->getSourceRange();
// Print all the other accesses as notes.
for (const auto &Use : Uses) {
if (Use.getUseExpr() == FirstRead)
continue;
- S.Diag(Use.getUseExpr()->getLocStart(),
+ S.Diag(Use.getUseExpr()->getBeginLoc(),
diag::note_arc_weak_also_accessed_here)
<< Use.getUseExpr()->getSourceRange();
}
@@ -1538,7 +1539,7 @@ public:
// Prefer a more confident report over a less confident one.
if (a.getKind() != b.getKind())
return a.getKind() > b.getKind();
- return a.getUser()->getLocStart() < b.getUser()->getLocStart();
+ return a.getUser()->getBeginLoc() < b.getUser()->getBeginLoc();
});
for (const auto &U : *vec) {
@@ -1605,7 +1606,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
OptionalNotes getNotes() const {
if (Verbose && CurrentFunction) {
- PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(),
S.PDiag(diag::note_thread_warning_in_fun)
<< CurrentFunction);
return OptionalNotes(1, FNote);
@@ -1616,7 +1617,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
OptionalNotes getNotes(const PartialDiagnosticAt &Note) const {
OptionalNotes ONS(1, Note);
if (Verbose && CurrentFunction) {
- PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(),
S.PDiag(diag::note_thread_warning_in_fun)
<< CurrentFunction);
ONS.push_back(std::move(FNote));
@@ -1630,7 +1631,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
ONS.push_back(Note1);
ONS.push_back(Note2);
if (Verbose && CurrentFunction) {
- PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(),
+ PartialDiagnosticAt FNote(CurrentFunction->getBody()->getBeginLoc(),
S.PDiag(diag::note_thread_warning_in_fun)
<< CurrentFunction);
ONS.push_back(std::move(FNote));
@@ -2068,11 +2069,11 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
}
// Install the logical handler for -Wtautological-overlap-compare
- std::unique_ptr<LogicalErrorHandler> LEH;
+ llvm::Optional<LogicalErrorHandler> LEH;
if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison,
- D->getLocStart())) {
- LEH.reset(new LogicalErrorHandler(S));
- AC.getCFGBuildOptions().Observer = LEH.get();
+ D->getBeginLoc())) {
+ LEH.emplace(S);
+ AC.getCFGBuildOptions().Observer = &*LEH;
}
// Emit delayed diagnostics.
@@ -2145,11 +2146,11 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// Check for thread safety violations
if (P.enableThreadSafetyAnalysis) {
SourceLocation FL = AC.getDecl()->getLocation();
- SourceLocation FEL = AC.getDecl()->getLocEnd();
+ SourceLocation FEL = AC.getDecl()->getEndLoc();
threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL);
- if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getLocStart()))
+ if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getBeginLoc()))
Reporter.setIssueBetaWarnings(true);
- if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart()))
+ if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getBeginLoc()))
Reporter.setVerbose(true);
threadSafety::runThreadSafetyAnalysis(AC, Reporter,
@@ -2164,9 +2165,9 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
Analyzer.run(AC);
}
- if (!Diags.isIgnored(diag::warn_uninit_var, D->getLocStart()) ||
- !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getLocStart()) ||
- !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getLocStart())) {
+ if (!Diags.isIgnored(diag::warn_uninit_var, D->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getBeginLoc())) {
if (CFG *cfg = AC.getCFG()) {
UninitValsDiagReporter reporter(S);
UninitVariablesAnalysisStats stats;
@@ -2189,29 +2190,29 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
}
bool FallThroughDiagFull =
- !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getLocStart());
+ !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getBeginLoc());
bool FallThroughDiagPerFunction = !Diags.isIgnored(
- diag::warn_unannotated_fallthrough_per_function, D->getLocStart());
+ diag::warn_unannotated_fallthrough_per_function, D->getBeginLoc());
if (FallThroughDiagFull || FallThroughDiagPerFunction ||
fscope->HasFallthroughStmt) {
DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull);
}
if (S.getLangOpts().ObjCWeak &&
- !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getLocStart()))
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getBeginLoc()))
diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap());
// Check for infinite self-recursion in functions
if (!Diags.isIgnored(diag::warn_infinite_recursive_function,
- D->getLocStart())) {
+ D->getBeginLoc())) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
checkRecursiveFunction(S, FD, Body, AC);
}
}
// Check for throw out of non-throwing function.
- if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getLocStart()))
+ if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc()))
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
if (S.getLangOpts().CPlusPlus && isNoexcept(FD))
checkThrowInNonThrowingFunc(S, FD, AC);
@@ -2219,7 +2220,7 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// If none of the previous checks caused a CFG build, trigger one here
// for -Wtautological-overlap-compare
if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison,
- D->getLocStart())) {
+ D->getBeginLoc())) {
AC.getCFG();
}
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 3d21d79f2b85..5f20af01fb7b 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -5,6 +5,7 @@ set(LLVM_LINK_COMPONENTS
if (MSVC)
set_source_files_properties(SemaDeclAttr.cpp PROPERTIES COMPILE_FLAGS /bigobj)
set_source_files_properties(SemaExpr.cpp PROPERTIES COMPILE_FLAGS /bigobj)
+ set_source_files_properties(SemaExprCXX.cpp PROPERTIES COMPILE_FLAGS /bigobj)
set_source_files_properties(SemaTemplate.cpp PROPERTIES COMPILE_FLAGS /bigobj)
endif()
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 8af54b993932..92e65c4b819b 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -20,8 +20,8 @@
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Type.h"
#include "clang/Basic/IdentifierTable.h"
-#include "clang/Sema/Sema.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -29,6 +29,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -48,6 +49,8 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_Expression:
case CCC_ObjCMessageReceiver:
case CCC_ParenthesizedExpression:
+ case CCC_Symbol:
+ case CCC_SymbolOrNewName:
return true;
case CCC_TopLevel:
@@ -64,8 +67,7 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCProtocolName:
case CCC_Namespace:
case CCC_Type:
- case CCC_Name:
- case CCC_PotentiallyQualifiedName:
+ case CCC_NewName:
case CCC_MacroName:
case CCC_MacroNameUse:
case CCC_PreprocessorExpression:
@@ -79,6 +81,7 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCClassMessage:
case CCC_ObjCInterfaceName:
case CCC_ObjCCategoryName:
+ case CCC_IncludedFile:
return false;
}
@@ -126,10 +129,12 @@ StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
return "Namespace";
case CCKind::CCC_Type:
return "Type";
- case CCKind::CCC_Name:
- return "Name";
- case CCKind::CCC_PotentiallyQualifiedName:
- return "PotentiallyQualifiedName";
+ case CCKind::CCC_NewName:
+ return "NewName";
+ case CCKind::CCC_Symbol:
+ return "Symbol";
+ case CCKind::CCC_SymbolOrNewName:
+ return "SymbolOrNewName";
case CCKind::CCC_MacroName:
return "MacroName";
case CCKind::CCC_MacroNameUse:
@@ -154,6 +159,8 @@ StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
return "ObjCInterfaceName";
case CCKind::CCC_ObjCCategoryName:
return "ObjCCategoryName";
+ case CCKind::CCC_IncludedFile:
+ return "IncludedFile";
case CCKind::CCC_Recovery:
return "Recovery";
}
@@ -265,23 +272,18 @@ CodeCompletionString::Chunk::CreateResultType(const char *ResultType) {
return Chunk(CK_ResultType, ResultType);
}
-CodeCompletionString::Chunk
-CodeCompletionString::Chunk::CreateCurrentParameter(
- const char *CurrentParameter) {
+CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateCurrentParameter(
+ const char *CurrentParameter) {
return Chunk(CK_CurrentParameter, CurrentParameter);
}
-CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
- unsigned NumChunks,
- unsigned Priority,
- CXAvailabilityKind Availability,
- const char **Annotations,
- unsigned NumAnnotations,
- StringRef ParentName,
- const char *BriefComment)
- : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
- Priority(Priority), Availability(Availability),
- ParentName(ParentName), BriefComment(BriefComment) {
+CodeCompletionString::CodeCompletionString(
+ const Chunk *Chunks, unsigned NumChunks, unsigned Priority,
+ CXAvailabilityKind Availability, const char **Annotations,
+ unsigned NumAnnotations, StringRef ParentName, const char *BriefComment)
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations), Priority(Priority),
+ Availability(Availability), ParentName(ParentName),
+ BriefComment(BriefComment) {
assert(NumChunks <= 0xffff);
assert(NumAnnotations <= 0xffff);
@@ -289,7 +291,8 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
for (unsigned I = 0; I != NumChunks; ++I)
StoredChunks[I] = Chunks[I];
- const char **StoredAnnotations = reinterpret_cast<const char **>(StoredChunks + NumChunks);
+ const char **StoredAnnotations =
+ reinterpret_cast<const char **>(StoredChunks + NumChunks);
for (unsigned I = 0; I != NumAnnotations; ++I)
StoredAnnotations[I] = Annotations[I];
}
@@ -300,7 +303,7 @@ unsigned CodeCompletionString::getAnnotationCount() const {
const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const {
if (AnnotationNr < NumAnnotations)
- return reinterpret_cast<const char * const*>(end())[AnnotationNr];
+ return reinterpret_cast<const char *const *>(end())[AnnotationNr];
else
return nullptr;
}
@@ -309,27 +312,33 @@ std::string CodeCompletionString::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) {
- switch (C->Kind) {
- case CK_Optional: OS << "{#" << C->Optional->getAsString() << "#}"; break;
- case CK_Placeholder: OS << "<#" << C->Text << "#>"; break;
-
+ for (const Chunk &C : *this) {
+ switch (C.Kind) {
+ case CK_Optional:
+ OS << "{#" << C.Optional->getAsString() << "#}";
+ break;
+ case CK_Placeholder:
+ OS << "<#" << C.Text << "#>";
+ break;
case CK_Informative:
case CK_ResultType:
- OS << "[#" << C->Text << "#]";
+ OS << "[#" << C.Text << "#]";
+ break;
+ case CK_CurrentParameter:
+ OS << "<#" << C.Text << "#>";
+ break;
+ default:
+ OS << C.Text;
break;
-
- case CK_CurrentParameter: OS << "<#" << C->Text << "#>"; break;
- default: OS << C->Text; break;
}
}
return OS.str();
}
const char *CodeCompletionString::getTypedText() const {
- for (iterator C = begin(), CEnd = end(); C != CEnd; ++C)
- if (C->Kind == CK_TypedText)
- return C->Text;
+ for (const Chunk &C : *this)
+ if (C.Kind == CK_TypedText)
+ return C.Text;
return nullptr;
}
@@ -364,7 +373,7 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
// Find the interesting names.
SmallVector<const DeclContext *, 2> Contexts;
while (DC && !DC->isFunctionOrMethod()) {
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC)) {
+ if (const auto *ND = dyn_cast<NamedDecl>(DC)) {
if (ND->getIdentifier())
Contexts.push_back(DC);
}
@@ -383,11 +392,11 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
OS << "::";
}
- const DeclContext *CurDC = Contexts[I-1];
- if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
+ const DeclContext *CurDC = Contexts[I - 1];
+ if (const auto *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
CurDC = CatImpl->getCategoryDecl();
- if (const ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
+ if (const auto *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
const ObjCInterfaceDecl *Interface = Cat->getClassInterface();
if (!Interface) {
// Assign an empty StringRef but with non-null data to distinguish
@@ -413,11 +422,9 @@ CodeCompletionString *CodeCompletionBuilder::TakeString() {
sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size() +
sizeof(const char *) * Annotations.size(),
alignof(CodeCompletionString));
- CodeCompletionString *Result
- = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
- Priority, Availability,
- Annotations.data(), Annotations.size(),
- ParentName, BriefComment);
+ CodeCompletionString *Result = new (Mem) CodeCompletionString(
+ Chunks.data(), Chunks.size(), Priority, Availability, Annotations.data(),
+ Annotations.size(), ParentName, BriefComment);
Chunks.clear();
return Result;
}
@@ -446,8 +453,8 @@ void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) {
Chunks.push_back(Chunk::CreateResultType(ResultType));
}
-void
-CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) {
+void CodeCompletionBuilder::AddCurrentParameterChunk(
+ const char *CurrentParameter) {
Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
}
@@ -477,8 +484,7 @@ void CodeCompletionBuilder::addBriefComment(StringRef Comment) {
//===----------------------------------------------------------------------===//
// Code completion overload candidate implementation
//===----------------------------------------------------------------------===//
-FunctionDecl *
-CodeCompleteConsumer::OverloadCandidate::getFunction() const {
+FunctionDecl *CodeCompleteConsumer::OverloadCandidate::getFunction() const {
if (getKind() == CK_Function)
return Function;
else if (getKind() == CK_FunctionTemplate)
@@ -494,8 +500,9 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
return Function->getType()->getAs<FunctionType>();
case CK_FunctionTemplate:
- return FunctionTemplate->getTemplatedDecl()->getType()
- ->getAs<FunctionType>();
+ return FunctionTemplate->getTemplatedDecl()
+ ->getType()
+ ->getAs<FunctionType>();
case CK_FunctionType:
return Type;
@@ -510,46 +517,56 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
CodeCompleteConsumer::~CodeCompleteConsumer() = default;
-bool PrintingCodeCompleteConsumer::isResultFilteredOut(StringRef Filter,
- CodeCompletionResult Result) {
+bool PrintingCodeCompleteConsumer::isResultFilteredOut(
+ StringRef Filter, CodeCompletionResult Result) {
switch (Result.Kind) {
case CodeCompletionResult::RK_Declaration:
return !(Result.Declaration->getIdentifier() &&
- Result.Declaration->getIdentifier()->getName().startswith(Filter));
+ Result.Declaration->getIdentifier()->getName().startswith(Filter));
case CodeCompletionResult::RK_Keyword:
return !StringRef(Result.Keyword).startswith(Filter);
case CodeCompletionResult::RK_Macro:
return !Result.Macro->getName().startswith(Filter);
case CodeCompletionResult::RK_Pattern:
- return !StringRef(Result.Pattern->getAsString()).startswith(Filter);
+ return !(Result.Pattern->getTypedText() &&
+ StringRef(Result.Pattern->getTypedText()).startswith(Filter));
}
llvm_unreachable("Unknown code completion result Kind.");
}
-void
-PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
- CodeCompletionContext Context,
- CodeCompletionResult *Results,
- unsigned NumResults) {
+void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
+ Sema &SemaRef, CodeCompletionContext Context, CodeCompletionResult *Results,
+ unsigned NumResults) {
std::stable_sort(Results, Results + NumResults);
- StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
+ if (!Context.getPreferredType().isNull())
+ OS << "PREFERRED-TYPE: " << Context.getPreferredType().getAsString()
+ << "\n";
- // Print the results.
+ StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
+ // Print the completions.
for (unsigned I = 0; I != NumResults; ++I) {
- if(!Filter.empty() && isResultFilteredOut(Filter, Results[I]))
+ if (!Filter.empty() && isResultFilteredOut(Filter, Results[I]))
continue;
OS << "COMPLETION: ";
switch (Results[I].Kind) {
case CodeCompletionResult::RK_Declaration:
OS << *Results[I].Declaration;
- if (Results[I].Hidden)
- OS << " (Hidden)";
- if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Context,
- getAllocator(),
- CCTUInfo,
- includeBriefComments())) {
+ {
+ std::vector<std::string> Tags;
+ if (Results[I].Hidden)
+ Tags.push_back("Hidden");
+ if (Results[I].InBaseClass)
+ Tags.push_back("InBase");
+ if (Results[I].Availability ==
+ CXAvailabilityKind::CXAvailability_NotAccessible)
+ Tags.push_back("Inaccessible");
+ if (!Tags.empty())
+ OS << " (" << llvm::join(Tags, ",") << ")";
+ }
+ if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(
+ SemaRef, Context, getAllocator(), CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
if (const char *BriefComment = CCS->getBriefComment())
OS << " : " << BriefComment;
@@ -581,19 +598,16 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
case CodeCompletionResult::RK_Macro:
OS << Results[I].Macro->getName();
- if (CodeCompletionString *CCS
- = Results[I].CreateCodeCompletionString(SemaRef, Context,
- getAllocator(),
- CCTUInfo,
- includeBriefComments())) {
+ if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(
+ SemaRef, Context, getAllocator(), CCTUInfo,
+ includeBriefComments())) {
OS << " : " << CCS->getAsString();
}
OS << '\n';
break;
case CodeCompletionResult::RK_Pattern:
- OS << "Pattern : "
- << Results[I].Pattern->getAsString() << '\n';
+ OS << "Pattern : " << Results[I].Pattern->getAsString() << '\n';
break;
}
}
@@ -618,22 +632,29 @@ static std::string getOverloadAsString(const CodeCompletionString &CCS) {
OS << "<#" << C.Text << "#>";
break;
- default: OS << C.Text; break;
+ // FIXME: We can also print optional parameters of an overload.
+ case CodeCompletionString::CK_Optional:
+ break;
+
+ default:
+ OS << C.Text;
+ break;
}
}
return OS.str();
}
-void
-PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
- unsigned CurrentArg,
- OverloadCandidate *Candidates,
- unsigned NumCandidates) {
+void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
+ Sema &SemaRef, unsigned CurrentArg, OverloadCandidate *Candidates,
+ unsigned NumCandidates, SourceLocation OpenParLoc) {
+ OS << "OPENING_PAREN_LOC: ";
+ OpenParLoc.print(OS, SemaRef.getSourceManager());
+ OS << "\n";
+
for (unsigned I = 0; I != NumCandidates; ++I) {
- if (CodeCompletionString *CCS
- = Candidates[I].CreateSignatureString(CurrentArg, SemaRef,
- getAllocator(), CCTUInfo,
- includeBriefComments())) {
+ if (CodeCompletionString *CCS = Candidates[I].CreateSignatureString(
+ CurrentArg, SemaRef, getAllocator(), CCTUInfo,
+ includeBriefComments())) {
OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n";
}
}
@@ -673,7 +694,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
break;
}
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Declaration))
+ if (const auto *Function = dyn_cast<FunctionDecl>(Declaration))
if (Function->isDeleted())
Availability = CXAvailability_NotAvailable;
@@ -707,15 +728,15 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
/// saved into Saved and the returned StringRef will refer to it.
StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const {
switch (Kind) {
- case RK_Keyword:
- return Keyword;
- case RK_Pattern:
- return Pattern->getTypedText();
- case RK_Macro:
- return Macro->getName();
- case RK_Declaration:
- // Handle declarations below.
- break;
+ case RK_Keyword:
+ return Keyword;
+ case RK_Pattern:
+ return Pattern->getTypedText();
+ case RK_Macro:
+ return Macro->getName();
+ case RK_Declaration:
+ // Handle declarations below.
+ break;
}
DeclarationName Name = Declaration->getDeclName();
@@ -725,8 +746,7 @@ StringRef CodeCompletionResult::getOrderedName(std::string &Saved) const {
if (IdentifierInfo *Id = Name.getAsIdentifierInfo())
return Id->getName();
if (Name.isObjCZeroArgSelector())
- if (IdentifierInfo *Id
- = Name.getObjCSelector().getIdentifierInfoForSlot(0))
+ if (IdentifierInfo *Id = Name.getObjCSelector().getIdentifierInfoForSlot(0))
return Id->getName();
Saved = Name.getAsString();
@@ -743,9 +763,5 @@ bool clang::operator<(const CodeCompletionResult &X,
return cmp < 0;
// If case-insensitive comparison fails, try case-sensitive comparison.
- cmp = XStr.compare(YStr);
- if (cmp)
- return cmp < 0;
-
- return false;
+ return XStr.compare(YStr) < 0;
}
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index b22eea2b3642..8b002dac1343 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -156,14 +156,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
unsigned NumParams,
SourceLocation EllipsisLoc,
SourceLocation RParenLoc,
- unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
- SourceLocation ConstQualifierLoc,
- SourceLocation
- VolatileQualifierLoc,
- SourceLocation
- RestrictQualifierLoc,
SourceLocation MutableLoc,
ExceptionSpecificationType
ESpecType,
@@ -178,8 +172,9 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
SourceLocation LocalRangeBegin,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
- TypeResult TrailingReturnType) {
- assert(!(TypeQuals & DeclSpec::TQ_atomic) &&
+ TypeResult TrailingReturnType,
+ DeclSpec *MethodQualifiers) {
+ assert(!(MethodQualifiers && MethodQualifiers->getTypeQualifiers() & DeclSpec::TQ_atomic) &&
"function cannot have _Atomic qualifier");
DeclaratorChunk I;
@@ -193,14 +188,10 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
I.Fun.RParenLoc = RParenLoc.getRawEncoding();
I.Fun.DeleteParams = false;
- I.Fun.TypeQuals = TypeQuals;
I.Fun.NumParams = NumParams;
I.Fun.Params = nullptr;
I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
- I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding();
- I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding();
- I.Fun.RestrictQualifierLoc = RestrictQualifierLoc.getRawEncoding();
I.Fun.MutableLoc = MutableLoc.getRawEncoding();
I.Fun.ExceptionSpecType = ESpecType;
I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin().getRawEncoding();
@@ -211,8 +202,21 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() ||
TrailingReturnType.isInvalid();
I.Fun.TrailingReturnType = TrailingReturnType.get();
+ I.Fun.MethodQualifiers = nullptr;
+ I.Fun.QualAttrFactory = nullptr;
+
+ if (MethodQualifiers && (MethodQualifiers->getTypeQualifiers() ||
+ MethodQualifiers->getAttributes().size())) {
+ auto &attrs = MethodQualifiers->getAttributes();
+ I.Fun.MethodQualifiers = new DeclSpec(attrs.getPool().getFactory());
+ MethodQualifiers->forEachCVRUQualifier(
+ [&](DeclSpec::TQ TypeQual, StringRef PrintName, SourceLocation SL) {
+ I.Fun.MethodQualifiers->SetTypeQual(TypeQual, SL);
+ });
+ I.Fun.MethodQualifiers->getAttributes().takeAllFrom(attrs);
+ I.Fun.MethodQualifiers->getAttributePool().takeAllFrom(attrs.getPool());
+ }
- assert(I.Fun.TypeQuals == TypeQuals && "bitfield overflow");
assert(I.Fun.ExceptionSpecType == ESpecType && "bitfield overflow");
// new[] a parameter array if needed.
@@ -403,6 +407,24 @@ bool Declarator::isCtorOrDtor() {
(getName().getKind() == UnqualifiedIdKind::IK_DestructorName);
}
+void DeclSpec::forEachCVRUQualifier(
+ llvm::function_ref<void(TQ, StringRef, SourceLocation)> Handle) {
+ if (TypeQualifiers & TQ_const)
+ Handle(TQ_const, "const", TQ_constLoc);
+ if (TypeQualifiers & TQ_volatile)
+ Handle(TQ_volatile, "volatile", TQ_volatileLoc);
+ if (TypeQualifiers & TQ_restrict)
+ Handle(TQ_restrict, "restrict", TQ_restrictLoc);
+ if (TypeQualifiers & TQ_unaligned)
+ Handle(TQ_unaligned, "unaligned", TQ_unalignedLoc);
+}
+
+void DeclSpec::forEachQualifier(
+ llvm::function_ref<void(TQ, StringRef, SourceLocation)> Handle) {
+ forEachCVRUQualifier(Handle);
+ // FIXME: Add code below to iterate through the attributes and call Handle.
+}
+
bool DeclSpec::hasTagDefinition() const {
if (!TypeSpecOwned)
return false;
@@ -438,7 +460,7 @@ template <class T> static bool BadSpecifier(T TNew, T TPrev,
if (TNew != TPrev)
DiagID = diag::err_invalid_decl_spec_combination;
else
- DiagID = IsExtension ? diag::ext_duplicate_declspec :
+ DiagID = IsExtension ? diag::ext_warn_duplicate_declspec :
diag::warn_duplicate_declspec;
return true;
}
@@ -566,14 +588,16 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// these storage-class specifiers.
// OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
// specifiers are not supported."
+ // OpenCL C++ v1.0 s2.9 restricts register.
if (S.getLangOpts().OpenCL &&
!S.getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers")) {
switch (SC) {
case SCS_extern:
case SCS_private_extern:
case SCS_static:
- if (S.getLangOpts().OpenCLVersion < 120) {
- DiagID = diag::err_opencl_unknown_type_specifier;
+ if (S.getLangOpts().OpenCLVersion < 120 &&
+ !S.getLangOpts().OpenCLCPlusPlus) {
+ DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = getSpecifierName(SC);
return true;
}
@@ -860,6 +884,11 @@ bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
IsExtension = false;
return BadSpecifier(T, T, PrevSpec, DiagID, IsExtension);
}
+
+ return SetTypeQual(T, Loc);
+}
+
+bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc) {
TypeQualifiers |= T;
switch (T) {
@@ -967,7 +996,7 @@ bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
unsigned &DiagID) {
if (isModulePrivateSpecified()) {
PrevSpec = "__module_private__";
- DiagID = diag::ext_duplicate_declspec;
+ DiagID = diag::ext_warn_duplicate_declspec;
return true;
}
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
index dba56931d49c..b439f7255728 100644
--- a/lib/Sema/IdentifierResolver.cpp
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -147,7 +147,7 @@ void IdentifierResolver::AddDecl(NamedDecl *D) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
updatingIdentifier(*II);
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
if (!Ptr) {
Name.setFETokenInfo(D);
@@ -172,7 +172,7 @@ void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
updatingIdentifier(*II);
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
if (!Ptr) {
AddDecl(D);
@@ -213,7 +213,7 @@ void IdentifierResolver::RemoveDecl(NamedDecl *D) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
updatingIdentifier(*II);
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
assert(Ptr && "Didn't find this decl on its identifier's chain!");
@@ -232,7 +232,7 @@ IdentifierResolver::begin(DeclarationName Name) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
readingIdentifier(*II);
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
if (!Ptr) return end();
if (isDeclPtr(Ptr))
@@ -304,7 +304,7 @@ bool IdentifierResolver::tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name){
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
readingIdentifier(*II);
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
if (!Ptr) {
Name.setFETokenInfo(D);
@@ -397,7 +397,7 @@ void IdentifierResolver::updatingIdentifier(IdentifierInfo &II) {
/// It creates a new IdDeclInfo if one was not created before for this id.
IdentifierResolver::IdDeclInfo &
IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) {
- void *Ptr = Name.getFETokenInfo<void>();
+ void *Ptr = Name.getFETokenInfo();
if (Ptr) return *toIdDeclInfo(Ptr);
@@ -415,7 +415,7 @@ IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) {
void IdentifierResolver::iterator::incrementSlowCase() {
NamedDecl *D = **this;
- void *InfoPtr = D->getDeclName().getFETokenInfo<void>();
+ void *InfoPtr = D->getDeclName().getFETokenInfo();
assert(!isDeclPtr(InfoPtr) && "Decl with wrong id ?");
IdDeclInfo *Info = toIdDeclInfo(InfoPtr);
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 58a7862370cc..a7495e8e0482 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -299,7 +299,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
auto *CS = cast<ObjCForCollectionStmt>(S);
unsigned Diag = diag::note_protected_by_objc_fast_enumeration;
unsigned NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope, Diag, 0, S->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, S->getBeginLoc()));
BuildScopeInformation(CS->getBody(), NewParentScope);
return;
}
@@ -353,16 +353,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// Cannot jump into the middle of the condition.
unsigned NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getBeginLoc()));
BuildScopeInformation(IS->getCond(), NewParentScope);
// Jumps into either arm of an 'if constexpr' are not allowed.
NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getBeginLoc()));
BuildScopeInformation(IS->getThen(), NewParentScope);
if (Stmt *Else = IS->getElse()) {
NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getBeginLoc()));
BuildScopeInformation(Else, NewParentScope);
}
return;
@@ -619,11 +619,11 @@ void JumpScopeChecker::VerifyJumps() {
continue;
SourceLocation Loc;
if (CaseStmt *CS = dyn_cast<CaseStmt>(SC))
- Loc = CS->getLocStart();
+ Loc = CS->getBeginLoc();
else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC))
- Loc = DS->getLocStart();
+ Loc = DS->getBeginLoc();
else
- Loc = SC->getLocStart();
+ Loc = SC->getBeginLoc();
CheckJump(SS, SC, Loc, diag::err_switch_into_protected_scope, 0,
diag::warn_cxx98_compat_switch_into_protected_scope);
}
@@ -863,7 +863,7 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
// less nested scope. Check if it crosses a __finally along the way.
for (unsigned I = FromScope; I > ToScope; I = Scopes[I].ParentScope) {
if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
- S.Diag(From->getLocStart(), diag::warn_jump_out_of_seh_finally);
+ S.Diag(From->getBeginLoc(), diag::warn_jump_out_of_seh_finally);
break;
}
}
diff --git a/lib/Sema/ParsedAttr.cpp b/lib/Sema/ParsedAttr.cpp
index 6509df9985ef..59e5aab677a9 100644
--- a/lib/Sema/ParsedAttr.cpp
+++ b/lib/Sema/ParsedAttr.cpp
@@ -41,8 +41,12 @@ size_t ParsedAttr::allocated_size() const {
else if (IsProperty)
return AttributeFactory::PropertyAllocSize;
else if (HasParsedType)
- return sizeof(ParsedAttr) + sizeof(void *);
- return (sizeof(ParsedAttr) + NumArgs * sizeof(ArgsUnion));
+ return totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
+ detail::TypeTagForDatatypeData, ParsedType,
+ detail::PropertyData>(0, 0, 0, 1, 0);
+ return totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
+ detail::TypeTagForDatatypeData, ParsedType,
+ detail::PropertyData>(NumArgs, 0, 0, 0, 0);
}
AttributeFactory::AttributeFactory() {
@@ -78,7 +82,7 @@ void AttributeFactory::deallocate(ParsedAttr *Attr) {
if (freeListIndex >= FreeLists.size())
FreeLists.resize(freeListIndex + 1);
-#if !NDEBUG
+#ifndef NDEBUG
// In debug mode, zero out the attribute to help find memory overwriting.
memset(Attr, 0, size);
#endif
@@ -99,15 +103,31 @@ void AttributePool::takePool(AttributePool &pool) {
#include "clang/Sema/AttrParsedAttrKinds.inc"
-static StringRef normalizeAttrName(StringRef AttrName, StringRef ScopeName,
+static StringRef normalizeAttrScopeName(StringRef ScopeName,
+ ParsedAttr::Syntax SyntaxUsed) {
+ // Normalize the "__gnu__" scope name to be "gnu" and the "_Clang" scope name
+ // to be "clang".
+ if (SyntaxUsed == ParsedAttr::AS_CXX11 ||
+ SyntaxUsed == ParsedAttr::AS_C2x) {
+ if (ScopeName == "__gnu__")
+ ScopeName = "gnu";
+ else if (ScopeName == "_Clang")
+ ScopeName = "clang";
+ }
+ return ScopeName;
+}
+
+static StringRef normalizeAttrName(StringRef AttrName,
+ StringRef NormalizedScopeName,
ParsedAttr::Syntax SyntaxUsed) {
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
- // for GNU attributes.
- bool IsGNU = SyntaxUsed == ParsedAttr::AS_GNU ||
- ((SyntaxUsed == ParsedAttr::AS_CXX11 ||
- SyntaxUsed == ParsedAttr::AS_C2x) &&
- ScopeName == "gnu");
- if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
+ // for GNU attributes, and attributes using the double square bracket syntax.
+ bool ShouldNormalize =
+ SyntaxUsed == ParsedAttr::AS_GNU ||
+ ((SyntaxUsed == ParsedAttr::AS_CXX11 ||
+ SyntaxUsed == ParsedAttr::AS_C2x) &&
+ (NormalizedScopeName == "gnu" || NormalizedScopeName == "clang"));
+ if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
@@ -121,7 +141,7 @@ ParsedAttr::Kind ParsedAttr::getKind(const IdentifierInfo *Name,
SmallString<64> FullName;
if (ScopeName)
- FullName += ScopeName->getName();
+ FullName += normalizeAttrScopeName(ScopeName->getName(), SyntaxUsed);
AttrName = normalizeAttrName(AttrName, FullName, SyntaxUsed);
@@ -137,9 +157,10 @@ ParsedAttr::Kind ParsedAttr::getKind(const IdentifierInfo *Name,
unsigned ParsedAttr::getAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
- StringRef Scope = ScopeName ? ScopeName->getName() : "";
- StringRef Name = normalizeAttrName(AttrName->getName(), Scope,
- (ParsedAttr::Syntax)SyntaxUsed);
+ auto Syntax = static_cast<ParsedAttr::Syntax>(SyntaxUsed);
+ StringRef Scope =
+ ScopeName ? normalizeAttrScopeName(ScopeName->getName(), Syntax) : "";
+ StringRef Name = normalizeAttrName(AttrName->getName(), Scope, Syntax);
#include "clang/Sema/AttrSpellingListIndex.inc"
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index 62a83ccb70aa..bd8db6f4ed91 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -54,6 +54,8 @@ void FunctionScopeInfo::Clear() {
PossiblyUnreachableDiags.clear();
WeakObjectUses.clear();
ModifiedNonNullParams.clear();
+ Blocks.clear();
+ ByrefBlockVars.clear();
}
static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 49f3decc1151..9fa39968625a 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -152,7 +152,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
NSNumberLiteralMethods[I] = nullptr;
- if (getLangOpts().ObjC1)
+ if (getLangOpts().ObjC)
NSAPIObj.reset(new NSAPI(Context));
if (getLangOpts().CPlusPlus)
@@ -167,7 +167,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
PreallocatedFunctionScope.reset(new FunctionScopeInfo(Diags));
- // Initilization of data sharing attributes stack for OpenMP
+ // Initialization of data sharing attributes stack for OpenMP
InitDataSharingAttributesStack();
std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
@@ -214,7 +214,7 @@ void Sema::Initialize() {
// Initialize predefined Objective-C types:
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// If 'SEL' does not yet refer to any declarations, make it refer to the
// predefined 'SEL'.
DeclarationName SEL = &Context.Idents.get("SEL");
@@ -320,6 +320,10 @@ void Sema::Initialize() {
#define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \
setOpenCLExtensionForType(Context.Id, Ext);
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ addImplicitTypedef(#ExtType, Context.Id##Ty); \
+ setOpenCLExtensionForType(Context.Id##Ty, #Ext);
+#include "clang/Basic/OpenCLExtensionTypes.def"
};
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
@@ -441,7 +445,7 @@ void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
- E->getLocStart()))
+ E->getBeginLoc()))
return;
// nullptr only exists from C++11 on, so don't warn on its absence earlier.
if (!getLangOpts().CPlusPlus11)
@@ -454,13 +458,13 @@ void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
// If it is a macro from system header, and if the macro name is not "NULL",
// do not warn.
- SourceLocation MaybeMacroLoc = E->getLocStart();
+ SourceLocation MaybeMacroLoc = E->getBeginLoc();
if (Diags.getSuppressSystemWarnings() &&
SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
!findMacroSpelling(MaybeMacroLoc, "NULL"))
return;
- Diag(E->getLocStart(), diag::warn_zero_as_null_pointer_constant)
+ Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
<< FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
}
@@ -488,7 +492,7 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
#endif
- diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getLocStart());
+ diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
diagnoseZeroToNullptrConversion(Kind, E);
QualType ExprTy = Context.getCanonicalType(E->getType());
@@ -533,6 +537,7 @@ CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
case Type::STK_Floating: return CK_FloatingToBoolean;
case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
+ case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
}
llvm_unreachable("unknown scalar type kind");
}
@@ -644,7 +649,8 @@ void Sema::getUndefinedButUsed(
continue;
if (FD->isExternallyVisible() &&
!isExternalWithNoLinkageType(FD) &&
- !FD->getMostRecentDecl()->isInlined())
+ !FD->getMostRecentDecl()->isInlined() &&
+ !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
if (FD->getBuiltinID())
continue;
@@ -654,7 +660,8 @@ void Sema::getUndefinedButUsed(
continue;
if (VD->isExternallyVisible() &&
!isExternalWithNoLinkageType(VD) &&
- !VD->getMostRecentDecl()->isInline())
+ !VD->getMostRecentDecl()->isInline() &&
+ !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
// Skip VarDecls that lack formal definitions but which we know are in
@@ -827,7 +834,9 @@ void Sema::emitAndClearUnusedLocalTypedefWarnings() {
/// is parsed. Note that the ASTContext may have already injected some
/// declarations.
void Sema::ActOnStartOfTranslationUnit() {
- if (getLangOpts().ModulesTS) {
+ if (getLangOpts().ModulesTS &&
+ (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface ||
+ getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
SourceLocation StartOfTU =
SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
@@ -922,10 +931,9 @@ void Sema::ActOnEndOfTranslationUnit() {
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
- // FIXME: This is wrong for TUKind == TU_Prefix. In that case, we need to
- // write out the lists to the AST file (if any).
+ assert(DelayedOverridingExceptionSpecChecks.empty());
+ assert(DelayedEquivalentExceptionSpecChecks.empty());
assert(DelayedDefaultedMemberExceptionSpecs.empty());
- assert(DelayedExceptionSpecChecks.empty());
// All dllexport classes should have been processed already.
assert(DelayedDllExportClasses.empty());
@@ -978,7 +986,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// module declaration by now.
if (getLangOpts().getCompilingModule() ==
LangOptions::CMK_ModuleInterface &&
- ModuleScopes.back().Module->Kind != Module::ModuleInterfaceUnit) {
+ (ModuleScopes.empty() ||
+ ModuleScopes.back().Module->Kind != Module::ModuleInterfaceUnit)) {
// FIXME: Make a better guess as to where to put the module declaration.
Diag(getSourceManager().getLocForStartOfFile(
getSourceManager().getMainFileID()),
@@ -1399,9 +1408,69 @@ void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
"Remove assertion if intentionally called in a non-lambda context.");
}
+// Check that the type of the VarDecl has an accessible copy constructor and
+// resolve its destructor's exception spefication.
+static void checkEscapingByref(VarDecl *VD, Sema &S) {
+ QualType T = VD->getType();
+ EnterExpressionEvaluationContext scope(
+ S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ SourceLocation Loc = VD->getLocation();
+ Expr *VarRef =
+ new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
+ ExprResult Result = S.PerformMoveOrCopyInitialization(
+ InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(),
+ VarRef, /*AllowNRVO=*/true);
+ if (!Result.isInvalid()) {
+ Result = S.MaybeCreateExprWithCleanups(Result);
+ Expr *Init = Result.getAs<Expr>();
+ S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
+ }
+
+ // The destructor's exception spefication is needed when IRGen generates
+ // block copy/destroy functions. Resolve it here.
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ if (CXXDestructorDecl *DD = RD->getDestructor()) {
+ auto *FPT = DD->getType()->getAs<FunctionProtoType>();
+ S.ResolveExceptionSpec(Loc, FPT);
+ }
+}
+
+static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
+ // Set the EscapingByref flag of __block variables captured by
+ // escaping blocks.
+ for (const BlockDecl *BD : FSI.Blocks) {
+ if (BD->doesNotEscape())
+ continue;
+ for (const BlockDecl::Capture &BC : BD->captures()) {
+ VarDecl *VD = BC.getVariable();
+ if (VD->hasAttr<BlocksAttr>())
+ VD->setEscapingByref();
+ }
+ }
+
+ for (VarDecl *VD : FSI.ByrefBlockVars) {
+ // __block variables might require us to capture a copy-initializer.
+ if (!VD->isEscapingByref())
+ continue;
+ // It's currently invalid to ever have a __block variable with an
+ // array type; should we diagnose that here?
+ // Regardless, we don't want to ignore array nesting when
+ // constructing this copy.
+ if (VD->getType()->isStructureOrClassType())
+ checkEscapingByref(VD, S);
+ }
+}
+
void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
const Decl *D, const BlockExpr *blkExpr) {
assert(!FunctionScopes.empty() && "mismatched push/pop!");
+
+ // This function shouldn't be called after popping the current function scope.
+ // markEscapingByrefs calls PerformMoveOrCopyInitialization, which can call
+ // PushFunctionScope, which can cause clearing out PreallocatedFunctionScope
+ // when FunctionScopes is empty.
+ markEscapingByrefs(*FunctionScopes.back(), *this);
+
FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
if (LangOpts.OpenMP)
@@ -1851,6 +1920,34 @@ void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) {
setOpenCLExtensionForDecl(D, CurrOpenCLExtension);
}
+std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) {
+ if (!OpenCLDeclExtMap.empty())
+ return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap);
+
+ return "";
+}
+
+std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
+ if (!OpenCLTypeExtMap.empty())
+ return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap);
+
+ return "";
+}
+
+template <typename T, typename MapT>
+std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
+ std::string ExtensionNames = "";
+ auto Loc = Map.find(FDT);
+
+ for (auto const& I : Loc->second) {
+ ExtensionNames += I;
+ ExtensionNames += " ";
+ }
+ ExtensionNames.pop_back();
+
+ return ExtensionNames;
+}
+
bool Sema::isOpenCLDisabledDecl(Decl *FD) {
auto Loc = OpenCLDeclExtMap.find(FD);
if (Loc == OpenCLDeclExtMap.end())
@@ -1889,6 +1986,14 @@ bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) {
if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr()))
Decl = TagT->getDecl();
auto Loc = DS.getTypeSpecTypeLoc();
+
+ // Check extensions for vector types.
+ // e.g. double4 is not allowed when cl_khr_fp64 is absent.
+ if (QT->isExtVectorType()) {
+ auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr();
+ return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap);
+ }
+
if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap))
return true;
@@ -1899,6 +2004,6 @@ bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) {
bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) {
IdentifierInfo *FnName = D.getIdentifier();
- return checkOpenCLDisabledTypeOrDecl(&D, E.getLocStart(), FnName,
+ return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName,
OpenCLDeclExtMap, 1, D.getSourceRange());
}
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
index e06792cae78b..69084589efea 100644
--- a/lib/Sema/SemaAccess.cpp
+++ b/lib/Sema/SemaAccess.cpp
@@ -1728,6 +1728,22 @@ Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, Entity);
}
+/// Checks implicit access to a member in a structured binding.
+Sema::AccessResult
+Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
+ CXXRecordDecl *DecomposedClass,
+ DeclAccessPair Field) {
+ if (!getLangOpts().AccessControl ||
+ Field.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, DecomposedClass, Field,
+ Context.getRecordType(DecomposedClass));
+ Entity.setDiag(diag::err_decomp_decl_inaccessible_field);
+
+ return CheckAccess(*this, UseLoc, Entity);
+}
+
/// Checks access to an overloaded member operator, including
/// conversion operators.
Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
@@ -1861,22 +1877,31 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
/// specifiers into account, but no member access expressions and such.
///
/// \param Target the declaration to check if it can be accessed
-/// \param Ctx the class/context from which to start the search
+/// \param NamingClass the class in which the lookup was started.
+/// \param BaseType type of the left side of member access expression.
+/// \p BaseType and \p NamingClass are used for C++ access control.
+/// Depending on the lookup case, they should be set to the following:
+/// - lhs.target (member access without a qualifier):
+/// \p BaseType and \p NamingClass are both the type of 'lhs'.
+/// - lhs.X::target (member access with a qualifier):
+/// BaseType is the type of 'lhs', NamingClass is 'X'
+/// - X::target (qualified lookup without member access):
+/// BaseType is null, NamingClass is 'X'.
+/// - target (unqualified lookup).
+/// BaseType is null, NamingClass is the parent class of 'target'.
/// \return true if the Target is accessible from the Class, false otherwise.
-bool Sema::IsSimplyAccessible(NamedDecl *Target, DeclContext *Ctx) {
- if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
- if (!Target->isCXXClassMember())
- return true;
-
- if (Target->getAccess() == AS_public)
- return true;
- QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+bool Sema::IsSimplyAccessible(NamedDecl *Target, CXXRecordDecl *NamingClass,
+ QualType BaseType) {
+ // Perform the C++ accessibility checks first.
+ if (Target->isCXXClassMember() && NamingClass) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
// The unprivileged access is AS_none as we don't know how the member was
// accessed, which is described by the access in DeclAccessPair.
// `IsAccessible` will examine the actual access of Target (i.e.
// Decl->getAccess()) when calculating the access.
- AccessTarget Entity(Context, AccessedEntity::Member, Class,
- DeclAccessPair::make(Target, AS_none), qType);
+ AccessTarget Entity(Context, AccessedEntity::Member, NamingClass,
+ DeclAccessPair::make(Target, AS_none), BaseType);
EffectiveContext EC(CurContext);
return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
}
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 1a8a00075306..2bc1b769f77a 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -405,7 +405,7 @@ void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
Diag(PragmaLocation, diag::warn_pragma_pop_failed) << PragmaName
<< "stack empty";
if (SegmentName &&
- !checkSectionName(SegmentName->getLocStart(), SegmentName->getString()))
+ !checkSectionName(SegmentName->getBeginLoc(), SegmentName->getString()))
return;
Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName);
}
@@ -520,9 +520,9 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
} // end anonymous namespace
-void Sema::ActOnPragmaAttributePush(ParsedAttr &Attribute,
- SourceLocation PragmaLoc,
- attr::ParsedSubjectMatchRuleSet Rules) {
+void Sema::ActOnPragmaAttributeAttribute(
+ ParsedAttr &Attribute, SourceLocation PragmaLoc,
+ attr::ParsedSubjectMatchRuleSet Rules) {
SmallVector<attr::SubjectMatchRule, 4> SubjectMatchRules;
// Gather the subject match rules that are supported by the attribute.
SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4>
@@ -622,54 +622,88 @@ void Sema::ActOnPragmaAttributePush(ParsedAttr &Attribute,
Diagnostic << attrMatcherRuleListToString(ExtraRules);
}
- PragmaAttributeStack.push_back(
+ if (PragmaAttributeStack.empty()) {
+ Diag(PragmaLoc, diag::err_pragma_attr_attr_no_push);
+ return;
+ }
+
+ PragmaAttributeStack.back().Entries.push_back(
{PragmaLoc, &Attribute, std::move(SubjectMatchRules), /*IsUsed=*/false});
}
-void Sema::ActOnPragmaAttributePop(SourceLocation PragmaLoc) {
+void Sema::ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
+ const IdentifierInfo *Namespace) {
+ PragmaAttributeStack.emplace_back();
+ PragmaAttributeStack.back().Loc = PragmaLoc;
+ PragmaAttributeStack.back().Namespace = Namespace;
+}
+
+void Sema::ActOnPragmaAttributePop(SourceLocation PragmaLoc,
+ const IdentifierInfo *Namespace) {
if (PragmaAttributeStack.empty()) {
- Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch);
+ Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch) << 1;
return;
}
- const PragmaAttributeEntry &Entry = PragmaAttributeStack.back();
- if (!Entry.IsUsed) {
- assert(Entry.Attribute && "Expected an attribute");
- Diag(Entry.Attribute->getLoc(), diag::warn_pragma_attribute_unused)
- << Entry.Attribute->getName();
- Diag(PragmaLoc, diag::note_pragma_attribute_region_ends_here);
+
+ // Dig back through the stack trying to find the most recently pushed group
+ // that in Namespace. Note that this works fine if no namespace is present,
+ // think of push/pops without namespaces as having an implicit "nullptr"
+ // namespace.
+ for (size_t Index = PragmaAttributeStack.size(); Index;) {
+ --Index;
+ if (PragmaAttributeStack[Index].Namespace == Namespace) {
+ for (const PragmaAttributeEntry &Entry :
+ PragmaAttributeStack[Index].Entries) {
+ if (!Entry.IsUsed) {
+ assert(Entry.Attribute && "Expected an attribute");
+ Diag(Entry.Attribute->getLoc(), diag::warn_pragma_attribute_unused)
+ << *Entry.Attribute;
+ Diag(PragmaLoc, diag::note_pragma_attribute_region_ends_here);
+ }
+ }
+ PragmaAttributeStack.erase(PragmaAttributeStack.begin() + Index);
+ return;
+ }
}
- PragmaAttributeStack.pop_back();
+
+ if (Namespace)
+ Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch)
+ << 0 << Namespace->getName();
+ else
+ Diag(PragmaLoc, diag::err_pragma_attribute_stack_mismatch) << 1;
}
void Sema::AddPragmaAttributes(Scope *S, Decl *D) {
if (PragmaAttributeStack.empty())
return;
- for (auto &Entry : PragmaAttributeStack) {
- ParsedAttr *Attribute = Entry.Attribute;
- assert(Attribute && "Expected an attribute");
-
- // Ensure that the attribute can be applied to the given declaration.
- bool Applies = false;
- for (const auto &Rule : Entry.MatchRules) {
- if (Attribute->appliesToDecl(D, Rule)) {
- Applies = true;
- break;
+ for (auto &Group : PragmaAttributeStack) {
+ for (auto &Entry : Group.Entries) {
+ ParsedAttr *Attribute = Entry.Attribute;
+ assert(Attribute && "Expected an attribute");
+
+ // Ensure that the attribute can be applied to the given declaration.
+ bool Applies = false;
+ for (const auto &Rule : Entry.MatchRules) {
+ if (Attribute->appliesToDecl(D, Rule)) {
+ Applies = true;
+ break;
+ }
}
+ if (!Applies)
+ continue;
+ Entry.IsUsed = true;
+ PragmaAttributeCurrentTargetDecl = D;
+ ParsedAttributesView Attrs;
+ Attrs.addAtEnd(Attribute);
+ ProcessDeclAttributeList(S, D, Attrs);
+ PragmaAttributeCurrentTargetDecl = nullptr;
}
- if (!Applies)
- continue;
- Entry.IsUsed = true;
- PragmaAttributeCurrentTargetDecl = D;
- ParsedAttributesView Attrs;
- Attrs.addAtStart(Attribute);
- ProcessDeclAttributeList(S, D, Attrs);
- PragmaAttributeCurrentTargetDecl = nullptr;
}
}
void Sema::PrintPragmaAttributeInstantiationPoint() {
assert(PragmaAttributeCurrentTargetDecl && "Expected an active declaration");
- Diags.Report(PragmaAttributeCurrentTargetDecl->getLocStart(),
+ Diags.Report(PragmaAttributeCurrentTargetDecl->getBeginLoc(),
diag::note_pragma_attribute_applied_decl_here);
}
@@ -773,6 +807,18 @@ void Sema::ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC) {
}
}
+void Sema::ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC) {
+ switch (FPC) {
+ case LangOptions::FEA_On:
+ FPFeatures.setAllowFEnvAccess();
+ break;
+ case LangOptions::FEA_Off:
+ FPFeatures.setDisallowFEnvAccess();
+ break;
+ }
+}
+
+
void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc) {
// Visibility calculations will consider the namespace's visibility.
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index 13dd8d936fd2..ffc728898584 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -48,7 +48,7 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context)
- DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ DeclRefExpr(Context, ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
MarkFunctionReferenced(LLLLoc, ConfigDecl);
return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr,
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 4e39d0675471..2354ffe7fbcc 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -209,11 +209,13 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
if (!tag || tag->isDependentContext())
return false;
+ // Grab the tag definition, if there is one.
+ QualType type = Context.getTypeDeclType(tag);
+ tag = type->getAsTagDecl();
+
// If we're currently defining this type, then lookup into the
// type is okay: don't complain that it isn't complete yet.
- QualType type = Context.getTypeDeclType(tag);
- const TagType *tagType = type->getAs<TagType>();
- if (tagType && tagType->isBeingDefined())
+ if (tag->isBeingDefined())
return false;
SourceLocation loc = SS.getLastQualifierNameLoc();
@@ -229,13 +231,13 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
// Fixed enum types are complete, but they aren't valid as scopes
// until we see a definition, so awkwardly pull out this special
// case.
- const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType);
- if (!enumType)
+ auto *EnumD = dyn_cast<EnumDecl>(tag);
+ if (!EnumD)
return false;
- if (enumType->getDecl()->isCompleteDefinition()) {
+ if (EnumD->isCompleteDefinition()) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
- if (!hasVisibleDefinition(enumType->getDecl(), &SuggestedDef,
+ if (!hasVisibleDefinition(EnumD, &SuggestedDef,
/*OnlyNeedComplete*/false)) {
// If the user is going to see an error here, recover by making the
// definition visible.
@@ -249,11 +251,11 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
// Try to instantiate the definition, if this is a specialization of an
// enumeration temploid.
- EnumDecl *ED = enumType->getDecl();
- if (EnumDecl *Pattern = ED->getInstantiatedFromMemberEnum()) {
- MemberSpecializationInfo *MSI = ED->getMemberSpecializationInfo();
+ if (EnumDecl *Pattern = EnumD->getInstantiatedFromMemberEnum()) {
+ MemberSpecializationInfo *MSI = EnumD->getMemberSpecializationInfo();
if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
- if (InstantiateEnum(loc, ED, Pattern, getTemplateInstantiationArgs(ED),
+ if (InstantiateEnum(loc, EnumD, Pattern,
+ getTemplateInstantiationArgs(EnumD),
TSK_ImplicitInstantiation)) {
SS.SetInvalid(SS.getRange());
return true;
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index 57aac80f5ad2..0b4645e11c34 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -131,6 +131,9 @@ namespace {
return PlaceholderKind == K;
}
+ // Language specific cast restrictions for address spaces.
+ void checkAddressSpaceCast(QualType SrcType, QualType DestType);
+
void checkCastAlign() {
Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
}
@@ -561,7 +564,7 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ObjC mode, there's nothing to check.
- if (!CheckCVR && CheckObjCLifetime && !Self.Context.getLangOpts().ObjC1)
+ if (!CheckCVR && CheckObjCLifetime && !Self.Context.getLangOpts().ObjC)
return CastAwayConstnessKind::CACK_None;
if (!DestType->isReferenceType()) {
@@ -1044,6 +1047,17 @@ void CastOperation::CheckStaticCast() {
}
}
+static bool IsAddressSpaceConversion(QualType SrcType, QualType DestType) {
+ auto *SrcPtrType = SrcType->getAs<PointerType>();
+ if (!SrcPtrType)
+ return false;
+ auto *DestPtrType = DestType->getAs<PointerType>();
+ if (!DestPtrType)
+ return false;
+ return SrcPtrType->getPointeeType().getAddressSpace() !=
+ DestPtrType->getPointeeType().getAddressSpace();
+}
+
/// TryStaticCast - Check if a static cast can be performed, and do so if
/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
/// and casting away constness.
@@ -1185,7 +1199,9 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
return TC_Failed;
}
}
- Kind = CK_BitCast;
+ Kind = IsAddressSpaceConversion(SrcType, DestType)
+ ? CK_AddressSpaceConversion
+ : CK_BitCast;
return TC_Success;
}
@@ -1264,7 +1280,7 @@ TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
}
Sema::ReferenceCompareResult RefResult = Self.CompareReferenceRelationship(
- SrcExpr->getLocStart(), ToType, FromType, DerivedToBase, ObjCConversion,
+ SrcExpr->getBeginLoc(), ToType, FromType, DerivedToBase, ObjCConversion,
ObjCLifetimeConversion);
if (RefResult != Sema::Ref_Compatible) {
if (CStyle || RefResult == Sema::Ref_Incompatible)
@@ -1281,7 +1297,7 @@ TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
Kind = CK_DerivedToBase;
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
- if (!Self.IsDerivedFrom(SrcExpr->getLocStart(), SrcExpr->getType(),
+ if (!Self.IsDerivedFrom(SrcExpr->getBeginLoc(), SrcExpr->getType(),
R->getPointeeType(), Paths))
return TC_NotApplicable;
@@ -1964,12 +1980,6 @@ static bool fixOverloadedReinterpretCastExpr(Sema &Self, QualType DestType,
return Result.isUsable();
}
-static bool IsAddressSpaceConversion(QualType SrcType, QualType DestType) {
- return SrcType->isPointerType() && DestType->isPointerType() &&
- SrcType->getAs<PointerType>()->getPointeeType().getAddressSpace() !=
- DestType->getAs<PointerType>()->getPointeeType().getAddressSpace();
-}
-
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
SourceRange OpRange,
@@ -2269,6 +2279,27 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return SuccessResult;
}
+void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
+ // In OpenCL only conversions between pointers to objects in overlapping
+ // addr spaces are allowed. v2.0 s6.5.5 - Generic addr space overlaps
+ // with any named one, except for constant.
+ if (Self.getLangOpts().OpenCL) {
+ auto SrcPtrType = SrcType->getAs<PointerType>();
+ if (!SrcPtrType)
+ return;
+ auto DestPtrType = DestType->getAs<PointerType>();
+ if (!DestPtrType)
+ return;
+ if (!DestPtrType->isAddressSpaceOverlapping(*SrcPtrType)) {
+ Self.Diag(OpRange.getBegin(),
+ diag::err_typecheck_incompatible_address_space)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ }
+ }
+}
+
void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
bool ListInitialization) {
assert(Self.getLangOpts().CPlusPlus);
@@ -2396,6 +2427,8 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
}
+ checkAddressSpaceCast(SrcExpr.get()->getType(), DestType);
+
if (isValidCast(tcr)) {
if (Kind == CK_BitCast)
checkCastAlign();
@@ -2482,20 +2515,9 @@ void CastOperation::CheckCStyleCast() {
assert(!SrcType->isPlaceholderType());
- // OpenCL v1 s6.5: Casting a pointer to address space A to a pointer to
- // address space B is illegal.
- if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
- SrcType->isPointerType()) {
- const PointerType *DestPtr = DestType->getAs<PointerType>();
- if (!DestPtr->isAddressSpaceOverlapping(*SrcType->getAs<PointerType>())) {
- Self.Diag(OpRange.getBegin(),
- diag::err_typecheck_incompatible_address_space)
- << SrcType << DestType << Sema::AA_Casting
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
- }
+ checkAddressSpaceCast(SrcType, DestType);
+ if (SrcExpr.isInvalid())
+ return;
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_typecheck_cast_to_incomplete)) {
@@ -2532,10 +2554,11 @@ void CastOperation::CheckCStyleCast() {
// OpenCL v2.0 s6.13.10 - Allow casts from '0' to event_t type.
if (Self.getLangOpts().OpenCL && DestType->isEventT()) {
- llvm::APSInt CastInt;
- if (SrcExpr.get()->EvaluateAsInt(CastInt, Self.Context)) {
+ Expr::EvalResult Result;
+ if (SrcExpr.get()->EvaluateAsInt(Result, Self.Context)) {
+ llvm::APSInt CastInt = Result.Val.getInt();
if (0 == CastInt) {
- Kind = CK_ZeroToOCLEvent;
+ Kind = CK_ZeroToOCLOpaqueType;
return;
}
Self.Diag(OpRange.getBegin(),
@@ -2612,9 +2635,9 @@ void CastOperation::CheckCStyleCast() {
} else if (!SrcType->isArithmeticType()) {
if (!DestType->isIntegralType(Self.Context) &&
DestType->isArithmeticType()) {
- Self.Diag(SrcExpr.get()->getLocStart(),
- diag::err_cast_pointer_to_non_pointer_int)
- << DestType << SrcExpr.get()->getSourceRange();
+ Self.Diag(SrcExpr.get()->getBeginLoc(),
+ diag::err_cast_pointer_to_non_pointer_int)
+ << DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
@@ -2623,8 +2646,8 @@ void CastOperation::CheckCStyleCast() {
if (Self.getLangOpts().OpenCL &&
!Self.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
if (DestType->isHalfType()) {
- Self.Diag(SrcExpr.get()->getLocStart(), diag::err_opencl_cast_to_half)
- << DestType << SrcExpr.get()->getSourceRange();
+ Self.Diag(SrcExpr.get()->getBeginLoc(), diag::err_opencl_cast_to_half)
+ << DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
@@ -2644,18 +2667,18 @@ void CastOperation::CheckCStyleCast() {
if (CastPtr->getPointeeType()->isObjCLifetimeType() &&
ExprPtr->getPointeeType()->isObjCLifetimeType() &&
!CastQuals.compatiblyIncludesObjCLifetime(ExprQuals)) {
- Self.Diag(SrcExpr.get()->getLocStart(),
+ Self.Diag(SrcExpr.get()->getBeginLoc(),
diag::err_typecheck_incompatible_ownership)
- << SrcType << DestType << Sema::AA_Casting
- << SrcExpr.get()->getSourceRange();
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
return;
}
}
}
else if (!Self.CheckObjCARCUnavailableWeakConversion(DestType, SrcType)) {
- Self.Diag(SrcExpr.get()->getLocStart(),
+ Self.Diag(SrcExpr.get()->getBeginLoc(),
diag::err_arc_convesion_of_weak_unavailable)
- << 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ << 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
@@ -2703,10 +2726,10 @@ static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
}
// This is a variant of int **x; const int **y = (const int **)x;
if (qualifiers == -1)
- Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual2)
+ Self.Diag(SrcExpr.get()->getBeginLoc(), diag::warn_cast_qual2)
<< SrcType << DestType;
else
- Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual)
+ Self.Diag(SrcExpr.get()->getBeginLoc(), diag::warn_cast_qual)
<< TheOffendingSrcType << TheOffendingDestType << qualifiers;
}
@@ -2716,7 +2739,7 @@ ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
Expr *CastExpr) {
CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
- Op.OpRange = SourceRange(LPLoc, CastExpr->getLocEnd());
+ Op.OpRange = SourceRange(LPLoc, CastExpr->getEndLoc());
if (getLangOpts().CPlusPlus) {
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
@@ -2744,7 +2767,7 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
CastOperation Op(*this, Type, CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
- Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getLocEnd());
+ Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getEndLoc());
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 93dbeab5b034..8dc1fdb76988 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -27,6 +27,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/FormatString.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
#include "clang/AST/OperationKinds.h"
@@ -35,7 +36,6 @@
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/UnresolvedSet.h"
-#include "clang/Analysis/Analyses/FormatString.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
@@ -112,13 +112,13 @@ static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
if (argCount == desiredArgCount) return false;
if (argCount < desiredArgCount)
- return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 /*function call*/ << desiredArgCount << argCount
- << call->getSourceRange();
+ return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << desiredArgCount << argCount
+ << call->getSourceRange();
// Highlight all the excess arguments.
- SourceRange range(call->getArg(desiredArgCount)->getLocStart(),
- call->getArg(argCount - 1)->getLocEnd());
+ SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
+ call->getArg(argCount - 1)->getEndLoc());
return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/ << desiredArgCount << argCount
@@ -135,8 +135,8 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
Expr *ValArg = TheCall->getArg(0);
QualType Ty = ValArg->getType();
if (!Ty->isIntegerType()) {
- S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg)
- << ValArg->getSourceRange();
+ S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
+ << ValArg->getSourceRange();
return true;
}
@@ -144,8 +144,8 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
if (!Literal || !Literal->isAscii()) {
- S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg)
- << StrArg->getSourceRange();
+ S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
+ << StrArg->getSourceRange();
return true;
}
@@ -156,7 +156,7 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
// We need at least one argument.
if (TheCall->getNumArgs() < 1) {
- S.Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
<< 0 << 1 << TheCall->getNumArgs()
<< TheCall->getCallee()->getSourceRange();
return true;
@@ -166,7 +166,7 @@ static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
for (Expr *Arg : TheCall->arguments()) {
auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
if (!Literal || !Literal->isWide()) {
- S.Diag(Arg->getLocStart(), diag::err_msvc_annotation_wide_str)
+ S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
<< Arg->getSourceRange();
return true;
}
@@ -182,7 +182,7 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return true;
ExprResult Arg(TheCall->getArg(0));
- QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart());
+ QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc());
if (ResultType.isNull())
return true;
@@ -200,7 +200,7 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
ExprResult Arg = TheCall->getArg(I);
QualType Ty = Arg.get()->getType();
if (!Ty->isIntegerType()) {
- S.Diag(Arg.get()->getLocStart(), diag::err_overflow_builtin_must_be_int)
+ S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
<< Ty << Arg.get()->getSourceRange();
return true;
}
@@ -221,7 +221,7 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
const auto *PtrTy = Ty->getAs<PointerType>();
if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
!PtrTy->getPointeeType().isConstQualified())) {
- S.Diag(Arg.get()->getLocStart(),
+ S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
<< Ty << Arg.get()->getSourceRange();
return true;
@@ -238,7 +238,8 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
CallExpr *TheCall, unsigned SizeIdx,
- unsigned DstSizeIdx) {
+ unsigned DstSizeIdx,
+ StringRef LikelyMacroName) {
if (TheCall->getNumArgs() <= SizeIdx ||
TheCall->getNumArgs() <= DstSizeIdx)
return;
@@ -246,29 +247,41 @@ static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
const Expr *SizeArg = TheCall->getArg(SizeIdx);
const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx);
- llvm::APSInt Size, DstSize;
+ Expr::EvalResult SizeResult, DstSizeResult;
// find out if both sizes are known at compile time
- if (!SizeArg->EvaluateAsInt(Size, S.Context) ||
- !DstSizeArg->EvaluateAsInt(DstSize, S.Context))
+ if (!SizeArg->EvaluateAsInt(SizeResult, S.Context) ||
+ !DstSizeArg->EvaluateAsInt(DstSizeResult, S.Context))
return;
+ llvm::APSInt Size = SizeResult.Val.getInt();
+ llvm::APSInt DstSize = DstSizeResult.Val.getInt();
+
if (Size.ule(DstSize))
return;
- // confirmed overflow so generate the diagnostic.
- IdentifierInfo *FnName = FDecl->getIdentifier();
- SourceLocation SL = TheCall->getLocStart();
- SourceRange SR = TheCall->getSourceRange();
+ // Confirmed overflow, so generate the diagnostic.
+ StringRef FunctionName = FDecl->getName();
+ SourceLocation SL = TheCall->getBeginLoc();
+ SourceManager &SM = S.getSourceManager();
+ // If we're in an expansion of a macro whose name corresponds to this builtin,
+ // use the simple macro name and location.
+ if (SL.isMacroID() && Lexer::getImmediateMacroName(SL, SM, S.getLangOpts()) ==
+ LikelyMacroName) {
+ FunctionName = LikelyMacroName;
+ SL = SM.getImmediateMacroCallerLoc(SL);
+ }
- S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName;
+ S.Diag(SL, diag::warn_memcpy_chk_overflow)
+ << FunctionName << DstSize.toString(/*Radix=*/10)
+ << Size.toString(/*Radix=*/10);
}
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
- SourceLocation BuiltinLoc = BuiltinCall->getLocStart();
+ SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
Expr *Call = BuiltinCall->getArg(0);
Expr *Chain = BuiltinCall->getArg(1);
@@ -375,9 +388,9 @@ static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
SourceLocation ErrorLoc;
if (isa<BlockExpr>(BlockArg)) {
BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl();
- ErrorLoc = BD->getParamDecl(ArgCounter)->getLocStart();
+ ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc();
} else if (isa<DeclRefExpr>(BlockArg)) {
- ErrorLoc = cast<DeclRefExpr>(BlockArg)->getLocStart();
+ ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc();
}
S.Diag(ErrorLoc,
diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
@@ -390,8 +403,8 @@ static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
- S.Diag(Call->getLocStart(), diag::err_opencl_requires_extension)
- << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
+ << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
return true;
}
return false;
@@ -407,16 +420,14 @@ static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
// First argument is an ndrange_t type.
Expr *NDRangeArg = TheCall->getArg(0);
if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
- S.Diag(NDRangeArg->getLocStart(),
- diag::err_opencl_builtin_expected_type)
+ S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
Expr *BlockArg = TheCall->getArg(1);
if (!isBlockPointer(BlockArg)) {
- S.Diag(BlockArg->getLocStart(),
- diag::err_opencl_builtin_expected_type)
+ S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
@@ -432,8 +443,7 @@ static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
Expr *BlockArg = TheCall->getArg(0);
if (!isBlockPointer(BlockArg)) {
- S.Diag(BlockArg->getLocStart(),
- diag::err_opencl_builtin_expected_type)
+ S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
@@ -467,7 +477,7 @@ static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
// For each argument passed to the block, a corresponding uint needs to
// be passed to describe the size of the local memory.
if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
- S.Diag(TheCall->getLocStart(),
+ S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_local_size_args);
return true;
}
@@ -507,7 +517,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs < 4) {
- S.Diag(TheCall->getLocStart(), diag::err_typecheck_call_too_few_args);
+ S.Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_few_args);
return true;
}
@@ -518,7 +528,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// First argument always needs to be a queue_t type.
if (!Arg0->getType()->isQueueT()) {
- S.Diag(TheCall->getArg(0)->getLocStart(),
+ S.Diag(TheCall->getArg(0)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << S.Context.OCLQueueTy;
return true;
@@ -526,7 +536,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// Second argument always needs to be a kernel_enqueue_flags_t enum value.
if (!Arg1->getType()->isIntegerType()) {
- S.Diag(TheCall->getArg(1)->getLocStart(),
+ S.Diag(TheCall->getArg(1)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
return true;
@@ -534,7 +544,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// Third argument is always an ndrange_t type.
if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
- S.Diag(TheCall->getArg(2)->getLocStart(),
+ S.Diag(TheCall->getArg(2)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "'ndrange_t'";
return true;
@@ -545,7 +555,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
if (NumArgs == 4) {
// check that the last argument is the right block type.
if (!isBlockPointer(Arg3)) {
- S.Diag(Arg3->getLocStart(), diag::err_opencl_builtin_expected_type)
+ S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
@@ -553,7 +563,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
const BlockPointerType *BPT =
cast<BlockPointerType>(Arg3->getType().getCanonicalType());
if (BPT->getPointeeType()->getAs<FunctionProtoType>()->getNumParams() > 0) {
- S.Diag(Arg3->getLocStart(),
+ S.Diag(Arg3->getBeginLoc(),
diag::err_opencl_enqueue_kernel_blocks_no_args);
return true;
}
@@ -568,7 +578,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// check common block argument.
Expr *Arg6 = TheCall->getArg(6);
if (!isBlockPointer(Arg6)) {
- S.Diag(Arg6->getLocStart(), diag::err_opencl_builtin_expected_type)
+ S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "block";
return true;
}
@@ -577,7 +587,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// Forth argument has to be any integer type.
if (!Arg3->getType()->isIntegerType()) {
- S.Diag(TheCall->getArg(3)->getLocStart(),
+ S.Diag(TheCall->getArg(3)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee() << "integer";
return true;
@@ -590,7 +600,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
if (!Arg4->isNullPointerConstant(S.Context,
Expr::NPC_ValueDependentIsNotNull) &&
!Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
- S.Diag(TheCall->getArg(4)->getLocStart(),
+ S.Diag(TheCall->getArg(4)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
@@ -602,7 +612,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
Expr::NPC_ValueDependentIsNotNull) &&
!(Arg5->getType()->isPointerType() &&
Arg5->getType()->getPointeeType()->isClkEventT())) {
- S.Diag(TheCall->getArg(5)->getLocStart(),
+ S.Diag(TheCall->getArg(5)->getBeginLoc(),
diag::err_opencl_builtin_expected_type)
<< TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
@@ -616,7 +626,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
}
// None of the specific case has been detected, give generic error
- S.Diag(TheCall->getLocStart(),
+ S.Diag(TheCall->getBeginLoc(),
diag::err_opencl_enqueue_kernel_incorrect_args);
return true;
}
@@ -631,7 +641,7 @@ static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
const Expr *Arg0 = Call->getArg(0);
// First argument type should always be pipe.
if (!Arg0->getType()->isPipeType()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Arg0->getSourceRange();
return true;
}
@@ -650,7 +660,7 @@ static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIsub_group_commit_read_pipe:
if (!(!AccessQual || AccessQual->isReadOnly())) {
- S.Diag(Arg0->getLocStart(),
+ S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "read_only" << Arg0->getSourceRange();
return true;
@@ -664,7 +674,7 @@ static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
case Builtin::BIwork_group_commit_write_pipe:
case Builtin::BIsub_group_commit_write_pipe:
if (!(AccessQual && AccessQual->isWriteOnly())) {
- S.Diag(Arg0->getLocStart(),
+ S.Diag(Arg0->getBeginLoc(),
diag::err_opencl_builtin_pipe_invalid_access_modifier)
<< "write_only" << Arg0->getSourceRange();
return true;
@@ -688,7 +698,7 @@ static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
if (!ArgTy ||
!S.Context.hasSameType(
EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.getPointerType(EltTy)
<< ArgIdx->getType() << ArgIdx->getSourceRange();
return true;
@@ -721,7 +731,7 @@ static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
// read/write_pipe(pipe T, reserve_id_t, uint, T*).
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
@@ -731,7 +741,7 @@ static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
const Expr *Arg2 = Call->getArg(2);
if (!Arg2->getType()->isIntegerType() &&
!Arg2->getType()->isUnsignedIntegerType()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Arg2->getType() << Arg2->getSourceRange();
return true;
@@ -742,7 +752,7 @@ static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
return true;
} break;
default:
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_arg_num)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
<< Call->getDirectCallee() << Call->getSourceRange();
return true;
}
@@ -765,7 +775,7 @@ static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
// Check the reserve size.
if (!Call->getArg(1)->getType()->isIntegerType() &&
!Call->getArg(1)->getType()->isUnsignedIntegerType()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.UnsignedIntTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
@@ -793,7 +803,7 @@ static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
// Check reserve_id_t.
if (!Call->getArg(1)->getType()->isReserveIDT()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
<< Call->getDirectCallee() << S.Context.OCLReserveIDTy
<< Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
return true;
@@ -812,7 +822,7 @@ static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
return true;
if (!Call->getArg(0)->getType()->isPipeType()) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_pipe_first_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
<< Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
return true;
}
@@ -829,7 +839,7 @@ static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
CallExpr *Call) {
if (Call->getNumArgs() != 1) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_arg_num)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num)
<< Call->getDirectCallee() << Call->getSourceRange();
return true;
}
@@ -837,11 +847,18 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
auto RT = Call->getArg(0)->getType();
if (!RT->isPointerType() || RT->getPointeeType()
.getAddressSpace() == LangAS::opencl_constant) {
- S.Diag(Call->getLocStart(), diag::err_opencl_builtin_to_addr_invalid_arg)
+ S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
<< Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
return true;
}
+ if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
+ S.Diag(Call->getArg(0)->getBeginLoc(),
+ diag::warn_opencl_generic_address_space_arg)
+ << Call->getDirectCallee()->getNameInfo().getAsString()
+ << Call->getArg(0)->getSourceRange();
+ }
+
RT = RT->getPointeeType();
auto Qual = RT.getQualifiers();
switch (BuiltinID) {
@@ -863,6 +880,66 @@ static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
return false;
}
+static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return ExprError();
+
+ // Compute __builtin_launder's parameter type from the argument.
+ // The parameter type is:
+ // * The type of the argument if it's not an array or function type,
+ // Otherwise,
+ // * The decayed argument type.
+ QualType ParamTy = [&]() {
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
+ return S.Context.getPointerType(Ty->getElementType());
+ if (ArgTy->isFunctionType()) {
+ return S.Context.getPointerType(ArgTy);
+ }
+ return ArgTy;
+ }();
+
+ TheCall->setType(ParamTy);
+
+ auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
+ if (!ParamTy->isPointerType())
+ return 0;
+ if (ParamTy->isFunctionPointerType())
+ return 1;
+ if (ParamTy->isVoidPointerType())
+ return 2;
+ return llvm::Optional<unsigned>{};
+ }();
+ if (DiagSelect.hasValue()) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
+ << DiagSelect.getValue() << TheCall->getSourceRange();
+ return ExprError();
+ }
+
+ // We either have an incomplete class type, or we have a class template
+ // whose instantiation has not been forced. Example:
+ //
+ // template <class T> struct Foo { T value; };
+ // Foo<int> *p = nullptr;
+ // auto *d = __builtin_launder(p);
+ if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
+ diag::err_incomplete_type))
+ return ExprError();
+
+ assert(ParamTy->getPointeeType()->isObjectType() &&
+ "Unhandled non-object pointer case");
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ParamTy, false);
+ ExprResult Arg =
+ S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0));
+ if (Arg.isInvalid())
+ return ExprError();
+ TheCall->setArg(0, Arg.get());
+
+ return TheCall;
+}
+
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
@@ -872,7 +949,7 @@ CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
return false;
- S.Diag(TheCall->getLocStart(), diag::err_builtin_target_unsupported)
+ S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< TheCall->getSourceRange();
return true;
}
@@ -915,6 +992,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
case Builtin::BI__va_start: {
switch (Context.getTargetInfo().getTriple().getArch()) {
+ case llvm::Triple::aarch64:
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (SemaBuiltinVAStartARMMicrosoft(TheCall))
@@ -1024,6 +1102,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (checkArgCount(*this, TheCall, 1)) return true;
TheCall->setType(Context.IntTy);
break;
+ case Builtin::BI__builtin_launder:
+ return SemaBuiltinLaunder(*this, TheCall);
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
@@ -1127,6 +1207,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return SemaBuiltinAtomicOverloaded(TheCallResult);
+ case Builtin::BI__sync_synchronize:
+ Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
+ << TheCall->getCallee()->getSourceRange();
+ break;
case Builtin::BI__builtin_nontemporal_load:
case Builtin::BI__builtin_nontemporal_store:
return SemaBuiltinNontemporalOverloaded(TheCallResult);
@@ -1171,7 +1255,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
const QualType PtrArgType = PtrArg->getType();
if (!PtrArgType->isPointerType() ||
!PtrArgType->getPointeeType()->isRecordType()) {
- Diag(PtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
<< "structure pointer";
return ExprError();
@@ -1181,9 +1265,9 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
const QualType FnPtrArgType = FnPtrArg->getType();
if (!FnPtrArgType->isPointerType()) {
- Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
+ Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
+ << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
@@ -1191,15 +1275,15 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
FnPtrArgType->getPointeeType()->getAs<FunctionType>();
if (!FuncType) {
- Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
+ Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
+ << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
}
if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
if (!FT->getNumParams()) {
- Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
@@ -1208,7 +1292,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
!PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
!PT->getPointeeType().isConstQualified()) {
- Diag(FnPtrArg->getLocStart(), diag::err_typecheck_convert_incompatible)
+ Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
<< 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
return ExprError();
@@ -1222,21 +1306,37 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// check secure string manipulation functions where overflows
// are detectable at compile time
case Builtin::BI__builtin___memcpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memcpy");
+ break;
case Builtin::BI__builtin___memmove_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memmove");
+ break;
case Builtin::BI__builtin___memset_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "memset");
+ break;
case Builtin::BI__builtin___strlcat_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcat");
+ break;
case Builtin::BI__builtin___strlcpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strlcpy");
+ break;
case Builtin::BI__builtin___strncat_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncat");
+ break;
case Builtin::BI__builtin___strncpy_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "strncpy");
+ break;
case Builtin::BI__builtin___stpncpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3);
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3, "stpncpy");
break;
case Builtin::BI__builtin___memccpy_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4);
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4, "memccpy");
break;
case Builtin::BI__builtin___snprintf_chk:
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "snprintf");
+ break;
case Builtin::BI__builtin___vsnprintf_chk:
- SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3);
+ SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3, "vsnprintf");
break;
case Builtin::BI__builtin_call_with_static_chain:
if (SemaBuiltinCallWithStaticChain(*this, TheCall))
@@ -1259,7 +1359,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
if (CheckCXXThrowOperand(
- TheCall->getLocStart(),
+ TheCall->getBeginLoc(),
Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()),
TheCall))
return ExprError();
@@ -1273,7 +1373,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// check for the argument.
if (SemaBuiltinRWPipe(*this, TheCall))
return ExprError();
- TheCall->setType(Context.IntTy);
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
@@ -1305,7 +1404,6 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BIget_pipe_max_packets:
if (SemaBuiltinPipePackets(*this, TheCall))
return ExprError();
- TheCall->setType(Context.UnsignedIntTy);
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
@@ -1477,8 +1575,8 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
TV = Result.getLimitedValue(64);
if ((TV > 63) || (mask & (1ULL << TV)) == 0)
- return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
- << TheCall->getArg(ImmArg)->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ImmArg)->getSourceRange();
}
if (PtrArgNum >= 0) {
@@ -1503,7 +1601,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
if (RHS.isInvalid())
return true;
- if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
+ if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy,
RHS.get(), AA_Assigning))
return true;
}
@@ -1557,8 +1655,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
- << PointerArg->getType() << PointerArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
@@ -1574,10 +1672,9 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
CastKind CastNeeded = CK_NoOp;
if (!AddrType.isAtLeastAsQualifiedAs(ValType)) {
CastNeeded = CK_BitCast;
- Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers)
- << PointerArg->getType()
- << Context.getPointerType(AddrType)
- << AA_Passing << PointerArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
+ << PointerArg->getType() << Context.getPointerType(AddrType)
+ << AA_Passing << PointerArg->getSourceRange();
}
// Finally, do the cast and replace the argument with the corrected version.
@@ -1592,16 +1689,16 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
// In general, we allow ints, floats and pointers to be loaded and stored.
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
- << PointerArg->getType() << PointerArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
+ << PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
// But ARM doesn't have instructions to deal with 128-bit versions.
if (Context.getTypeSize(ValType) > MaxWidth) {
assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
- Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size)
- << PointerArg->getType() << PointerArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
+ << PointerArg->getType() << PointerArg->getSourceRange();
return true;
}
@@ -1614,8 +1711,8 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
- Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
- << ValType << PointerArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
+ << ValType << PointerArg->getSourceRange();
return true;
}
@@ -1715,6 +1812,16 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
BuiltinID == AArch64::BI__builtin_arm_wsrp)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
+ // Only check the valid encoding range. Any constant in this range would be
+ // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
+ // an exception for incorrect registers. This matches MSVC behavior.
+ if (BuiltinID == AArch64::BI_ReadStatusReg ||
+ BuiltinID == AArch64::BI_WriteStatusReg)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
+
+ if (BuiltinID == AArch64::BI__getReg)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
+
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
@@ -1732,783 +1839,820 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
}
bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
- static const std::map<unsigned, std::vector<StringRef>> ValidCPU = {
- { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, {"v62", "v65"} },
+ struct BuiltinAndString {
+ unsigned BuiltinID;
+ const char *Str;
};
- static const std::map<unsigned, std::vector<StringRef>> ValidHVX = {
- { Hexagon::BI__builtin_HEXAGON_V6_extractw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_hi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lo, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vand, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, {"v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vror, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, {"v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh, {"v60", "v62", "v65"} },
- { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, {"v60", "v62", "v65"} },
+ static BuiltinAndString ValidCPU[] = {
+ { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" },
+ };
+
+ static BuiltinAndString ValidHVX[] = {
+ { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" },
+ { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" },
+ };
+
+ // Sort the tables on first execution so we can binary search them.
+ auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ };
+ static const bool SortOnce =
+ (llvm::sort(ValidCPU, SortCmp),
+ llvm::sort(ValidHVX, SortCmp), true);
+ (void)SortOnce;
+ auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) {
+ return BI.BuiltinID < BuiltinID;
};
const TargetInfo &TI = Context.getTargetInfo();
- auto FC = ValidCPU.find(BuiltinID);
- if (FC != ValidCPU.end()) {
+ const BuiltinAndString *FC =
+ std::lower_bound(std::begin(ValidCPU), std::end(ValidCPU), BuiltinID,
+ LowerBoundCmp);
+ if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) {
const TargetOptions &Opts = TI.getTargetOpts();
StringRef CPU = Opts.CPU;
if (!CPU.empty()) {
assert(CPU.startswith("hexagon") && "Unexpected CPU name");
CPU.consume_front("hexagon");
- if (llvm::none_of(FC->second, [CPU](StringRef S) { return S == CPU; }))
- return Diag(TheCall->getLocStart(),
+ SmallVector<StringRef, 3> CPUs;
+ StringRef(FC->Str).split(CPUs, ',');
+ if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; }))
+ return Diag(TheCall->getBeginLoc(),
diag::err_hexagon_builtin_unsupported_cpu);
}
}
- auto FH = ValidHVX.find(BuiltinID);
- if (FH != ValidHVX.end()) {
+ const BuiltinAndString *FH =
+ std::lower_bound(std::begin(ValidHVX), std::end(ValidHVX), BuiltinID,
+ LowerBoundCmp);
+ if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) {
if (!TI.hasFeature("hvx"))
- return Diag(TheCall->getLocStart(),
+ return Diag(TheCall->getBeginLoc(),
diag::err_hexagon_builtin_requires_hvx);
- bool IsValid = llvm::any_of(FH->second,
+ SmallVector<StringRef, 3> HVXs;
+ StringRef(FH->Str).split(HVXs, ',');
+ bool IsValid = llvm::any_of(HVXs,
[&TI] (StringRef V) {
std::string F = "hvx" + V.str();
return TI.hasFeature(F);
});
if (!IsValid)
- return Diag(TheCall->getLocStart(),
+ return Diag(TheCall->getBeginLoc(),
diag::err_hexagon_builtin_unsupported_hvx);
}
@@ -2517,15 +2661,17 @@ bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
struct ArgInfo {
- ArgInfo(unsigned O, bool S, unsigned W, unsigned A)
- : OpNum(O), IsSigned(S), BitWidth(W), Align(A) {}
- unsigned OpNum = 0;
- bool IsSigned = false;
- unsigned BitWidth = 0;
- unsigned Align = 0;
+ uint8_t OpNum;
+ bool IsSigned;
+ uint8_t BitWidth;
+ uint8_t Align;
+ };
+ struct BuiltinInfo {
+ unsigned BuiltinID;
+ ArgInfo Infos[2];
};
- static const std::map<unsigned, std::vector<ArgInfo>> Infos = {
+ static BuiltinInfo Infos[] = {
{ Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} },
{ Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} },
{ Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} },
@@ -2711,15 +2857,33 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
{{ 3, false, 1, 0 }} },
};
- auto F = Infos.find(BuiltinID);
- if (F == Infos.end())
+ // Use a dynamically initialized static to sort the table exactly once on
+ // first run.
+ static const bool SortOnce =
+ (llvm::sort(Infos,
+ [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
+ return LHS.BuiltinID < RHS.BuiltinID;
+ }),
+ true);
+ (void)SortOnce;
+
+ const BuiltinInfo *F =
+ std::lower_bound(std::begin(Infos), std::end(Infos), BuiltinID,
+ [](const BuiltinInfo &BI, unsigned BuiltinID) {
+ return BI.BuiltinID < BuiltinID;
+ });
+ if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
return false;
bool Error = false;
- for (const ArgInfo &A : F->second) {
- int32_t Min = A.IsSigned ? -(1 << (A.BitWidth-1)) : 0;
- int32_t Max = (1 << (A.IsSigned ? A.BitWidth-1 : A.BitWidth)) - 1;
+ for (const ArgInfo &A : F->Infos) {
+ // Ignore empty ArgInfo elements.
+ if (A.BitWidth == 0)
+ continue;
+
+ int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
+ int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
if (!A.Align) {
Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
} else {
@@ -2760,7 +2924,7 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
- // MSA instrinsics. Instructions (which the intrinsics maps to) which use the
+ // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
// df/m field.
// These intrinsics take an unsigned 3 bit immediate.
case Mips::BI__builtin_msa_bclri_b:
@@ -2903,14 +3067,14 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_ldi_h:
case Mips::BI__builtin_msa_ldi_w:
case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
- case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break;
- case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break;
- case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break;
- case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break;
- case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break;
- case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break;
- case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break;
- case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break;
+ case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
+ case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
+ case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
+ case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
+ case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
}
if (!m)
@@ -2935,15 +3099,22 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
BuiltinID == PPC::BI__builtin_divdeu;
if (Is64BitBltin && !IsTarget64Bit)
- return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt)
- << TheCall->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
+ << TheCall->getSourceRange();
if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) ||
(BuiltinID == PPC::BI__builtin_bpermd &&
!Context.getTargetInfo().hasFeature("bpermd")))
- return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7)
+ return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
<< TheCall->getSourceRange();
+ auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
+ if (!Context.getTargetInfo().hasFeature("vsx"))
+ return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
+ << TheCall->getSourceRange();
+ return false;
+ };
+
switch (BuiltinID) {
default: return false;
case PPC::BI__builtin_altivec_crypto_vshasigmaw:
@@ -2962,6 +3133,11 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case PPC::BI__builtin_vsx_xxpermdi:
case PPC::BI__builtin_vsx_xxsldwi:
return SemaBuiltinVSX(TheCall);
+ case PPC::BI__builtin_unpack_vector_int128:
+ return SemaVSXCheck(TheCall) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_pack_vector_int128:
+ return SemaVSXCheck(TheCall);
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
@@ -2973,7 +3149,7 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
llvm::APSInt AbortCode(32);
if (Arg->isIntegerConstantExpr(AbortCode, Context) &&
AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256)
- return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code)
+ return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
<< Arg->getSourceRange();
}
@@ -3037,14 +3213,14 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
// Check if the argument is a string literal.
if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
<< Arg->getSourceRange();
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
if (!S.Context.getTargetInfo().validateCpuSupports(Feature))
- return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports)
+ return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
<< Arg->getSourceRange();
return false;
}
@@ -3057,14 +3233,14 @@ static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) {
// Check if the argument is a string literal.
if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return S.Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
<< Arg->getSourceRange();
// Check the contents of the string.
StringRef Feature =
cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
if (!S.Context.getTargetInfo().validateCpuIs(Feature))
- return S.Diag(TheCall->getLocStart(), diag::err_invalid_cpu_is)
+ return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
<< Arg->getSourceRange();
return false;
}
@@ -3267,8 +3443,8 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
(HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
return false;
- return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_rounding)
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
+ << Arg->getSourceRange();
}
// Check if the gather/scatter scale is legal.
@@ -3370,8 +3546,8 @@ bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
return false;
- return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_scale)
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
+ << Arg->getSourceRange();
}
static bool isX86_32Builtin(unsigned BuiltinID) {
@@ -3395,7 +3571,7 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
// Check for 32-bit only builtins on a 64-bit target.
const llvm::Triple &TT = Context.getTargetInfo().getTriple();
if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
- return Diag(TheCall->getCallee()->getLocStart(),
+ return Diag(TheCall->getCallee()->getBeginLoc(),
diag::err_32_bit_builtin_64_bit_tgt);
// If the intrinsic has rounding or SAE make sure its valid.
@@ -3630,6 +3806,14 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift:
+ case X86::BI__builtin_ia32_kshiftliqi:
+ case X86::BI__builtin_ia32_kshiftlihi:
+ case X86::BI__builtin_ia32_kshiftlisi:
+ case X86::BI__builtin_ia32_kshiftlidi:
+ case X86::BI__builtin_ia32_kshiftriqi:
+ case X86::BI__builtin_ia32_kshiftrihi:
+ case X86::BI__builtin_ia32_kshiftrisi:
+ case X86::BI__builtin_ia32_kshiftridi:
i = 1; l = 0; u = 255;
break;
case X86::BI__builtin_ia32_vperm2f128_pd256:
@@ -4056,7 +4240,7 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
CheckAbsoluteValueFunction(TheCall, FDecl);
CheckMaxUnsignedZero(TheCall, FDecl);
- if (getLangOpts().ObjC1)
+ if (getLangOpts().ObjC)
DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
unsigned CMId = FDecl->getMemoryFunctionKind();
@@ -4312,15 +4496,15 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
++AdjustedNumArgs;
// Check we have the right number of arguments.
if (TheCall->getNumArgs() < AdjustedNumArgs) {
- Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << AdjustedNumArgs << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
+ Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 << AdjustedNumArgs << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
return ExprError();
} else if (TheCall->getNumArgs() > AdjustedNumArgs) {
- Diag(TheCall->getArg(AdjustedNumArgs)->getLocStart(),
+ Diag(TheCall->getArg(AdjustedNumArgs)->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
- << 0 << AdjustedNumArgs << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
+ << 0 << AdjustedNumArgs << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
return ExprError();
}
@@ -4333,8 +4517,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
Ptr = ConvertedPtr.get();
const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
- << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -4343,13 +4527,13 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
QualType ValType = AtomTy; // 'C'
if (IsC11) {
if (!AtomTy->isAtomicType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
- << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (AtomTy.isConstQualified() ||
+ if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
AtomTy.getAddressSpace() == LangAS::opencl_constant) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic)
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_atomic)
<< (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
<< Ptr->getSourceRange();
return ExprError();
@@ -4357,8 +4541,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
ValType = AtomTy->getAs<AtomicType>()->getValueType();
} else if (Form != Load && Form != LoadCopy) {
if (ValType.isConstQualified()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_pointer)
- << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_non_const_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
}
@@ -4368,33 +4552,33 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// gcc does not enforce these rules for GNU atomics, but we do so for sanity.
if (IsAddSub && !ValType->isIntegerType()
&& !ValType->isPointerType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
- << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
if (IsMinMax) {
const BuiltinType *BT = ValType->getAs<BuiltinType>();
if (!BT || (BT->getKind() != BuiltinType::Int &&
BT->getKind() != BuiltinType::UInt)) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_int32_or_ptr);
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_int32_or_ptr);
return ExprError();
}
}
if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
- << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
if (IsC11 && ValType->isPointerType() &&
- RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(),
+ RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
diag::err_incomplete_type)) {
return ExprError();
}
} else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
// For __atomic_*_n operations, the value type must be a scalar integral or
// pointer type which is 1, 2, 4, 8 or 16 bytes in length.
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
- << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -4402,8 +4586,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
!AtomTy->isScalarType()) {
// For GNU atomics, require a trivially-copyable type. This is not part of
// the GNU atomics specification, but we enforce it for sanity.
- Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
- << Ptr->getType() << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_op_needs_trivial_copy)
+ << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -4418,8 +4602,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case Qualifiers::OCL_Autoreleasing:
// FIXME: Can this happen? By this point, ValType should be known
// to be trivially copyable.
- Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
- << ValType << Ptr->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
+ << ValType << Ptr->getSourceRange();
return ExprError();
}
@@ -4457,7 +4641,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
case 0:
// The first argument is always a pointer. It has a fixed type.
// It is always dereferenced, a nullptr is undefined.
- CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
// Nothing else to do: we already know all we want about this pointer.
continue;
case 1:
@@ -4471,14 +4655,14 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
else if (Form == Copy || Form == Xchg) {
if (IsPassedByAddress)
// The value pointer is always dereferenced, a nullptr is undefined.
- CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
Ty = ByValType;
} else if (Form == Arithmetic)
Ty = Context.getPointerDiffType();
else {
Expr *ValArg = TheCall->getArg(i);
// The value pointer is always dereferenced, a nullptr is undefined.
- CheckNonNullArgument(*this, ValArg, DRE->getLocStart());
+ CheckNonNullArgument(*this, ValArg, DRE->getBeginLoc());
LangAS AS = LangAS::Default;
// Keep address space of non-atomic pointer type.
if (const PointerType *PtrTy =
@@ -4493,7 +4677,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
// The third argument to compare_exchange / GNU exchange is the desired
// value, either by-value (for the C11 and *_n variant) or as a pointer.
if (IsPassedByAddress)
- CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getLocStart());
+ CheckNonNullArgument(*this, TheCall->getArg(i), DRE->getBeginLoc());
Ty = ByValType;
break;
case 3:
@@ -4558,7 +4742,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
llvm::APSInt Result(32);
if (SubExprs[1]->isIntegerConstantExpr(Result, Context) &&
!isValidOrderingForOp(Result.getSExtValue(), Op))
- Diag(SubExprs[1]->getLocStart(),
+ Diag(SubExprs[1]->getBeginLoc(),
diag::warn_atomic_op_has_invalid_memory_order)
<< SubExprs[1]->getSourceRange();
}
@@ -4568,25 +4752,26 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
llvm::APSInt Result(32);
if (Scope->isIntegerConstantExpr(Result, Context) &&
!ScopeModel->isValid(Result.getZExtValue())) {
- Diag(Scope->getLocStart(), diag::err_atomic_op_has_invalid_synch_scope)
+ Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
<< Scope->getSourceRange();
}
SubExprs.push_back(Scope);
}
- AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
- SubExprs, ResultType, Op,
- TheCall->getRParenLoc());
+ AtomicExpr *AE =
+ new (Context) AtomicExpr(TheCall->getCallee()->getBeginLoc(), SubExprs,
+ ResultType, Op, TheCall->getRParenLoc());
if ((Op == AtomicExpr::AO__c11_atomic_load ||
Op == AtomicExpr::AO__c11_atomic_store ||
Op == AtomicExpr::AO__opencl_atomic_load ||
Op == AtomicExpr::AO__opencl_atomic_store ) &&
Context.AtomicUsesUnsupportedLibcall(AE))
- Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib)
+ Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
<< ((Op == AtomicExpr::AO__c11_atomic_load ||
- Op == AtomicExpr::AO__opencl_atomic_load)
- ? 0 : 1);
+ Op == AtomicExpr::AO__opencl_atomic_load)
+ ? 0
+ : 1);
return AE;
}
@@ -4615,25 +4800,24 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
return false;
}
-/// SemaBuiltinAtomicOverloaded - We have a call to a function like
-/// __sync_fetch_and_add, which is an overloaded function based on the pointer
-/// type of its first argument. The main ActOnCallExpr routines have already
-/// promoted the types of arguments because all of these calls are prototyped as
-/// void(...).
+/// We have a call to a function like __sync_fetch_and_add, which is an
+/// overloaded function based on the pointer type of its first argument.
+/// The main ActOnCallExpr routines have already promoted the types of
+/// arguments because all of these calls are prototyped as void(...).
///
/// This function goes through and does final semantic checking for these
-/// builtins,
+/// builtins, as well as generating any warnings.
ExprResult
Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
- CallExpr *TheCall = (CallExpr *)TheCallResult.get();
- DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
+ Expr *Callee = TheCall->getCallee();
+ DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts());
FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
// Ensure that we have at least one argument to do type inference from.
if (TheCall->getNumArgs() < 1) {
- Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1 << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
+ Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
return ExprError();
}
@@ -4651,21 +4835,21 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
return ExprError();
}
QualType ValType = pointerType->getPointeeType();
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType()) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
return ExprError();
}
if (ValType.isConstQualified()) {
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_cannot_be_const)
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
<< FirstArg->getType() << FirstArg->getSourceRange();
return ExprError();
}
@@ -4679,8 +4863,8 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
- Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
- << ValType << FirstArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
+ << ValType << FirstArg->getSourceRange();
return ExprError();
}
@@ -4730,8 +4914,8 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
case 8: SizeIndex = 3; break;
case 16: SizeIndex = 4; break;
default:
- Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
return ExprError();
}
@@ -4908,15 +5092,18 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// Now that we know how many fixed arguments we expect, first check that we
// have at least that many.
if (TheCall->getNumArgs() < 1+NumFixed) {
- Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1+NumFixed << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
+ Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1 + NumFixed << TheCall->getNumArgs()
+ << Callee->getSourceRange();
return ExprError();
}
+ Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
+ << Callee->getSourceRange();
+
if (WarnAboutSemanticsChange) {
- Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change)
- << TheCall->getCallee()->getSourceRange();
+ Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
+ << Callee->getSourceRange();
}
// Get the decl for the concrete builtin from this, we can tell what the
@@ -4929,7 +5116,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
else {
// Perform builtin lookup to avoid redeclaring it.
DeclarationName DN(&Context.Idents.get(NewBuiltinName));
- LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName);
+ LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true);
assert(Res.getFoundDecl());
NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl());
@@ -4961,8 +5148,6 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
TheCall->setArg(i+1, Arg.get());
}
- ASTContext& Context = this->getASTContext();
-
// Create a new DeclRefExpr to refer to the new decl.
DeclRefExpr* NewDRE = DeclRefExpr::Create(
Context,
@@ -5026,7 +5211,7 @@ ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
if (!pointerType) {
- Diag(DRE->getLocStart(), diag::err_nontemporal_builtin_must_be_pointer)
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
<< PointerArg->getType() << PointerArg->getSourceRange();
return ExprError();
}
@@ -5038,7 +5223,7 @@ ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType() && !ValType->isFloatingType() &&
!ValType->isVectorType()) {
- Diag(DRE->getLocStart(),
+ Diag(DRE->getBeginLoc(),
diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
<< PointerArg->getType() << PointerArg->getSourceRange();
return ExprError();
@@ -5070,8 +5255,8 @@ bool Sema::CheckObjCString(Expr *Arg) {
StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
if (!Literal || !Literal->isAscii()) {
- Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
- << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
+ << Arg->getSourceRange();
return true;
}
@@ -5087,8 +5272,8 @@ bool Sema::CheckObjCString(Expr *Arg) {
ToPtr + NumBytes, llvm::strictConversion);
// Check for conversion failure.
if (Result != llvm::conversionOK)
- Diag(Arg->getLocStart(),
- diag::warn_cfstring_truncated) << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
+ << Arg->getSourceRange();
}
return false;
}
@@ -5106,7 +5291,7 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
return ExprError(
- Diag(Arg->getLocStart(), diag::err_os_log_format_not_string_constant)
+ Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
<< Arg->getSourceRange());
}
@@ -5133,7 +5318,7 @@ static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
if (IsMSVAStart) {
// Don't allow this in System V ABI functions.
if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
- return S.Diag(Fn->getLocStart(),
+ return S.Diag(Fn->getBeginLoc(),
diag::err_ms_va_start_used_in_sysv_function);
} else {
// On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
@@ -5142,7 +5327,7 @@ static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
// System V ABI functions on Windows.)
if ((IsWindows && CC == CC_X86_64SysV) ||
(!IsWindows && CC == CC_Win64))
- return S.Diag(Fn->getLocStart(),
+ return S.Diag(Fn->getBeginLoc(),
diag::err_va_start_used_in_wrong_abi_function)
<< !IsWindows;
}
@@ -5150,7 +5335,7 @@ static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
}
if (IsMSVAStart)
- return S.Diag(Fn->getLocStart(), diag::err_builtin_x64_aarch64_only);
+ return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
return false;
}
@@ -5173,16 +5358,16 @@ static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
Params = MD->parameters();
} else if (isa<CapturedDecl>(Caller)) {
// We don't support va_start in a CapturedDecl.
- S.Diag(Fn->getLocStart(), diag::err_va_start_captured_stmt);
+ S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
return true;
} else {
// This must be some other declcontext that parses exprs.
- S.Diag(Fn->getLocStart(), diag::err_va_start_outside_function);
+ S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
return true;
}
if (!IsVariadic) {
- S.Diag(Fn->getLocStart(), diag::err_va_start_fixed_function);
+ S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
return true;
}
@@ -5202,19 +5387,19 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
return true;
if (TheCall->getNumArgs() > 2) {
- Diag(TheCall->getArg(2)->getLocStart(),
+ Diag(TheCall->getArg(2)->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs()
- << Fn->getSourceRange()
- << SourceRange(TheCall->getArg(2)->getLocStart(),
- (*(TheCall->arg_end()-1))->getLocEnd());
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << Fn->getSourceRange()
+ << SourceRange(TheCall->getArg(2)->getBeginLoc(),
+ (*(TheCall->arg_end() - 1))->getEndLoc());
return true;
}
if (TheCall->getNumArgs() < 2) {
- return Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs();
+ return Diag(TheCall->getEndLoc(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs();
}
// Type-check the first argument normally.
@@ -5249,7 +5434,7 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
}
if (!SecondArgIsLastNamedArgument)
- Diag(TheCall->getArg(1)->getLocStart(),
+ Diag(TheCall->getArg(1)->getBeginLoc(),
diag::warn_second_arg_of_va_start_not_last_named_param);
else if (IsCRegister || Type->isReferenceType() ||
Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
@@ -5266,7 +5451,7 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
unsigned Reason = 0;
if (Type->isReferenceType()) Reason = 1;
else if (IsCRegister) Reason = 2;
- Diag(Arg->getLocStart(), diag::warn_va_start_type_is_undefined) << Reason;
+ Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
Diag(ParamLoc, diag::note_parameter_type) << Type;
}
@@ -5281,7 +5466,7 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
Expr *Func = Call->getCallee();
if (Call->getNumArgs() < 3)
- return Diag(Call->getLocEnd(),
+ return Diag(Call->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
<< 0 /*function call*/ << 3 << Call->getNumArgs();
@@ -5305,20 +5490,18 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
Context.getPointerType(Context.CharTy.withConst());
if (!Arg1Ty->isPointerType() ||
Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
- Diag(Arg1->getLocStart(), diag::err_typecheck_convert_incompatible)
- << Arg1->getType() << ConstCharPtrTy
- << 1 /* different class */
- << 0 /* qualifier difference */
- << 3 /* parameter mismatch */
+ Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
+ << 0 /* qualifier difference */
+ << 3 /* parameter mismatch */
<< 2 << Arg1->getType() << ConstCharPtrTy;
const QualType SizeTy = Context.getSizeType();
if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
- Diag(Arg2->getLocStart(), diag::err_typecheck_convert_incompatible)
- << Arg2->getType() << SizeTy
- << 1 /* different class */
- << 0 /* qualifier difference */
- << 3 /* parameter mismatch */
+ Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
+ << Arg2->getType() << SizeTy << 1 /* different class */
+ << 0 /* qualifier difference */
+ << 3 /* parameter mismatch */
<< 3 << Arg2->getType() << SizeTy;
return false;
@@ -5328,14 +5511,14 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
/// friends. This is declared to take (...), so we have to check everything.
bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
if (TheCall->getNumArgs() < 2)
- return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << 2 << TheCall->getNumArgs()/*function call*/;
+ return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 << 2 << TheCall->getNumArgs() /*function call*/;
if (TheCall->getNumArgs() > 2)
- return Diag(TheCall->getArg(2)->getLocStart(),
+ return Diag(TheCall->getArg(2)->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << 2 << TheCall->getNumArgs()
- << SourceRange(TheCall->getArg(2)->getLocStart(),
- (*(TheCall->arg_end()-1))->getLocEnd());
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(2)->getBeginLoc(),
+ (*(TheCall->arg_end() - 1))->getEndLoc());
ExprResult OrigArg0 = TheCall->getArg(0);
ExprResult OrigArg1 = TheCall->getArg(1);
@@ -5358,10 +5541,11 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
// If the common type isn't a real floating type, then the arguments were
// invalid for this operation.
if (Res.isNull() || !Res->isRealFloatingType())
- return Diag(OrigArg0.get()->getLocStart(),
+ return Diag(OrigArg0.get()->getBeginLoc(),
diag::err_typecheck_call_invalid_ordered_compare)
- << OrigArg0.get()->getType() << OrigArg1.get()->getType()
- << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd());
+ << OrigArg0.get()->getType() << OrigArg1.get()->getType()
+ << SourceRange(OrigArg0.get()->getBeginLoc(),
+ OrigArg1.get()->getEndLoc());
return false;
}
@@ -5372,14 +5556,14 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
/// value.
bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
if (TheCall->getNumArgs() < NumArgs)
- return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
- << 0 << NumArgs << TheCall->getNumArgs()/*function call*/;
+ return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 << NumArgs << TheCall->getNumArgs() /*function call*/;
if (TheCall->getNumArgs() > NumArgs)
- return Diag(TheCall->getArg(NumArgs)->getLocStart(),
+ return Diag(TheCall->getArg(NumArgs)->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
- << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
- (*(TheCall->arg_end()-1))->getLocEnd());
+ << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(),
+ (*(TheCall->arg_end() - 1))->getEndLoc());
Expr *OrigArg = TheCall->getArg(NumArgs-1);
@@ -5388,9 +5572,9 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
// This operation requires a non-_Complex floating-point number.
if (!OrigArg->getType()->isRealFloatingType())
- return Diag(OrigArg->getLocStart(),
+ return Diag(OrigArg->getBeginLoc(),
diag::err_typecheck_call_invalid_unary_fp)
- << OrigArg->getType() << OrigArg->getSourceRange();
+ << OrigArg->getType() << OrigArg->getSourceRange();
// If this is an implicit conversion from float -> float, double, or
// long double, remove it.
@@ -5424,13 +5608,13 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
unsigned ExpectedNumArgs = 3;
if (TheCall->getNumArgs() < ExpectedNumArgs)
- return Diag(TheCall->getLocEnd(),
+ return Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
+ << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
<< TheCall->getSourceRange();
if (TheCall->getNumArgs() > ExpectedNumArgs)
- return Diag(TheCall->getLocEnd(),
+ return Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_many_args_at_most)
<< 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
<< TheCall->getSourceRange();
@@ -5438,31 +5622,31 @@ bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
// Check the third argument is a compile time constant
llvm::APSInt Value;
if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
- return Diag(TheCall->getLocStart(),
+ return Diag(TheCall->getBeginLoc(),
diag::err_vsx_builtin_nonconstant_argument)
<< 3 /* argument index */ << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(2)->getLocStart(),
- TheCall->getArg(2)->getLocEnd());
+ << SourceRange(TheCall->getArg(2)->getBeginLoc(),
+ TheCall->getArg(2)->getEndLoc());
QualType Arg1Ty = TheCall->getArg(0)->getType();
QualType Arg2Ty = TheCall->getArg(1)->getType();
// Check the type of argument 1 and argument 2 are vectors.
- SourceLocation BuiltinLoc = TheCall->getLocStart();
+ SourceLocation BuiltinLoc = TheCall->getBeginLoc();
if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
(!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
<< TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getLocStart(),
- TheCall->getArg(1)->getLocEnd());
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
}
// Check the first two arguments are the same type.
if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getLocStart(),
- TheCall->getArg(1)->getLocEnd());
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc());
}
// When default clang type checking is turned off and the customized type
@@ -5477,7 +5661,7 @@ bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
// This is declared to take (...), so we have to check everything.
ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (TheCall->getNumArgs() < 2)
- return ExprError(Diag(TheCall->getLocEnd(),
+ return ExprError(Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
<< 0 /*function call*/ << 2 << TheCall->getNumArgs()
<< TheCall->getSourceRange());
@@ -5494,11 +5678,11 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
QualType RHSType = TheCall->getArg(1)->getType();
if (!LHSType->isVectorType() || !RHSType->isVectorType())
- return ExprError(Diag(TheCall->getLocStart(),
- diag::err_vec_builtin_non_vector)
- << TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getLocStart(),
- TheCall->getArg(1)->getLocEnd()));
+ return ExprError(
+ Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc()));
numElements = LHSType->getAs<VectorType>()->getNumElements();
unsigned numResElements = TheCall->getNumArgs() - 2;
@@ -5509,17 +5693,17 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (TheCall->getNumArgs() == 2) {
if (!RHSType->hasIntegerRepresentation() ||
RHSType->getAs<VectorType>()->getNumElements() != numElements)
- return ExprError(Diag(TheCall->getLocStart(),
+ return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(1)->getLocStart(),
- TheCall->getArg(1)->getLocEnd()));
+ << SourceRange(TheCall->getArg(1)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc()));
} else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
- return ExprError(Diag(TheCall->getLocStart(),
+ return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee()
- << SourceRange(TheCall->getArg(0)->getLocStart(),
- TheCall->getArg(1)->getLocEnd()));
+ << SourceRange(TheCall->getArg(0)->getBeginLoc(),
+ TheCall->getArg(1)->getEndLoc()));
} else if (numElements != numResElements) {
QualType eltType = LHSType->getAs<VectorType>()->getElementType();
resType = Context.getVectorType(eltType, numResElements,
@@ -5534,7 +5718,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
llvm::APSInt Result(32);
if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
- return ExprError(Diag(TheCall->getLocStart(),
+ return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_shufflevector_nonconstant_argument)
<< TheCall->getArg(i)->getSourceRange());
@@ -5543,7 +5727,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
continue;
if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
- return ExprError(Diag(TheCall->getLocStart(),
+ return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_shufflevector_argument_too_large)
<< TheCall->getArg(i)->getSourceRange());
}
@@ -5556,7 +5740,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
}
return new (Context) ShuffleVectorExpr(Context, exprs, resType,
- TheCall->getCallee()->getLocStart(),
+ TheCall->getCallee()->getBeginLoc(),
TheCall->getRParenLoc());
}
@@ -5597,10 +5781,9 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs > 3)
- return Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_many_args_at_most)
- << 0 /*function call*/ << 3 << NumArgs
- << TheCall->getSourceRange();
+ return Diag(TheCall->getEndLoc(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
// Argument 0 is checked for us and the remaining arguments must be
// constant integers.
@@ -5619,9 +5802,9 @@ bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
if (Arg->isInstantiationDependent()) return false;
if (Arg->HasSideEffects(Context))
- Diag(Arg->getLocStart(), diag::warn_assume_side_effects)
- << Arg->getSourceRange()
- << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
+ Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
+ << Arg->getSourceRange()
+ << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
return false;
}
@@ -5637,26 +5820,24 @@ bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
if (const auto *UE =
dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
- if (UE->getKind() == UETT_AlignOf)
- Diag(TheCall->getLocStart(), diag::warn_alloca_align_alignof)
- << Arg->getSourceRange();
+ if (UE->getKind() == UETT_AlignOf ||
+ UE->getKind() == UETT_PreferredAlignOf)
+ Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
+ << Arg->getSourceRange();
llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
if (!Result.isPowerOf2())
- return Diag(TheCall->getLocStart(),
- diag::err_alignment_not_power_of_two)
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
+ << Arg->getSourceRange();
if (Result < Context.getCharWidth())
- return Diag(TheCall->getLocStart(), diag::err_alignment_too_small)
- << (unsigned)Context.getCharWidth()
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
+ << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
if (Result > std::numeric_limits<int32_t>::max())
- return Diag(TheCall->getLocStart(), diag::err_alignment_too_big)
- << std::numeric_limits<int32_t>::max()
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
+ << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
}
return false;
@@ -5668,10 +5849,9 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
if (NumArgs > 3)
- return Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_many_args_at_most)
- << 0 /*function call*/ << 3 << NumArgs
- << TheCall->getSourceRange();
+ return Diag(TheCall->getEndLoc(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
// The alignment must be a constant integer.
Expr *Arg = TheCall->getArg(1);
@@ -5683,9 +5863,8 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
return true;
if (!Result.isPowerOf2())
- return Diag(TheCall->getLocStart(),
- diag::err_alignment_not_power_of_two)
- << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
+ << Arg->getSourceRange();
}
if (NumArgs > 2) {
@@ -5708,12 +5887,12 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
unsigned NumArgs = TheCall->getNumArgs();
unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
if (NumArgs < NumRequiredArgs) {
- return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /* function call */ << NumRequiredArgs << NumArgs
<< TheCall->getSourceRange();
}
if (NumArgs >= NumRequiredArgs + 0x100) {
- return Diag(TheCall->getLocEnd(),
+ return Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_many_args_at_most)
<< 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
<< TheCall->getSourceRange();
@@ -5751,7 +5930,7 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
return true;
CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
if (ArgSize.getQuantity() >= 0x100) {
- return Diag(Arg.get()->getLocEnd(), diag::err_os_log_argument_too_big)
+ return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
<< i << (int)ArgSize.getQuantity() << 0xff
<< TheCall->getSourceRange();
}
@@ -5766,7 +5945,7 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
bool Success = CheckFormatArguments(
Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
- VariadicFunction, TheCall->getLocStart(), SourceRange(),
+ VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
CheckedVarArgs);
if (!Success)
return true;
@@ -5791,8 +5970,8 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
if (!Arg->isIntegerConstantExpr(Result, Context))
- return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type)
- << FDecl->getDeclName() << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
+ << FDecl->getDeclName() << Arg->getSourceRange();
return false;
}
@@ -5814,15 +5993,15 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
if (RangeIsError)
- return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
<< Result.toString(10) << Low << High << Arg->getSourceRange();
else
// Defer the warning until we know if the code will be emitted so that
// dead code can ignore this.
- DiagRuntimeBehavior(TheCall->getLocStart(), TheCall,
- PDiag(diag::warn_argument_invalid_range)
- << Result.toString(10) << Low << High
- << Arg->getSourceRange());
+ DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
+ PDiag(diag::warn_argument_invalid_range)
+ << Result.toString(10) << Low << High
+ << Arg->getSourceRange());
}
return false;
@@ -5844,8 +6023,8 @@ bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
return true;
if (Result.getSExtValue() % Num != 0)
- return Diag(TheCall->getLocStart(), diag::err_argument_not_multiple)
- << Num << Arg->getSourceRange();
+ return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
+ << Num << Arg->getSourceRange();
return false;
}
@@ -5876,7 +6055,7 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
// Check if the argument is a string literal.
if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
- return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal)
+ return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
<< Arg->getSourceRange();
// Check the type of special register given.
@@ -5885,7 +6064,7 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
Reg.split(Fields, ":");
if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
- return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
<< Arg->getSourceRange();
// If the string is the name of a register then we cannot check that it is
@@ -5927,7 +6106,7 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
}
if (!ValidString)
- return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg)
+ return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
<< Arg->getSourceRange();
} else if (IsAArch64Builtin && Fields.size() == 1) {
// If the register name is one of those that appear in the condition below
@@ -5955,8 +6134,8 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
/// that val is a constant 1.
bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
if (!Context.getTargetInfo().hasSjLjLowering())
- return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_unsupported)
- << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
+ return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
+ << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
Expr *Arg = TheCall->getArg(1);
llvm::APSInt Result;
@@ -5966,8 +6145,8 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
return true;
if (Result != 1)
- return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
- << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
+ << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
return false;
}
@@ -5976,8 +6155,8 @@ bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
/// This checks that the target supports __builtin_setjmp.
bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
if (!Context.getTargetInfo().hasSjLjLowering())
- return Diag(TheCall->getLocStart(), diag::err_builtin_setjmp_unsupported)
- << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd());
+ return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
+ << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
return false;
}
@@ -6121,11 +6300,11 @@ class FormatStringLiteral {
StartToken, StartTokenByteOffset);
}
- SourceLocation getLocStart() const LLVM_READONLY {
- return FExpr->getLocStart().getLocWithOffset(Offset);
+ SourceLocation getBeginLoc() const LLVM_READONLY {
+ return FExpr->getBeginLoc().getLocWithOffset(Offset);
}
- SourceLocation getLocEnd() const LLVM_READONLY { return FExpr->getLocEnd(); }
+ SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
};
} // namespace
@@ -6377,13 +6556,12 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return SLCT_NotALiteral;
}
case Stmt::BinaryOperatorClass: {
- llvm::APSInt LResult;
- llvm::APSInt RResult;
-
const BinaryOperator *BinOp = cast<BinaryOperator>(E);
// A string literal + an int offset is still a string literal.
if (BinOp->isAdditiveOp()) {
+ Expr::EvalResult LResult, RResult;
+
bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
@@ -6392,12 +6570,12 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (LIsInt) {
if (BinOpKind == BO_Add) {
- sumOffsets(Offset, LResult, BinOpKind, RIsInt);
+ sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt);
E = BinOp->getRHS();
goto tryAgain;
}
} else {
- sumOffsets(Offset, RResult, BinOpKind, RIsInt);
+ sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt);
E = BinOp->getLHS();
goto tryAgain;
}
@@ -6410,9 +6588,10 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
- llvm::APSInt IndexResult;
+ Expr::EvalResult IndexResult;
if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
- sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true);
+ sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
+ /*RHS is int*/ true);
E = ASE->getBase();
goto tryAgain;
}
@@ -6511,7 +6690,7 @@ bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
// format is either NSString or CFString. This is a hack to prevent
// diag when using the NSLocalizedString and CFCopyLocalizedString macros
// which are usually used in place of NS and CF string literals.
- SourceLocation FormatLoc = Args[format_idx]->getLocStart();
+ SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc))
return false;
@@ -6831,7 +7010,7 @@ void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
if (!ArgExpr)
return;
- SourceLocation Loc = ArgExpr->getLocStart();
+ SourceLocation Loc = ArgExpr->getBeginLoc();
if (S.getSourceManager().isInSystemMacro(Loc))
return;
@@ -7029,6 +7208,8 @@ public:
const char *startSpecifier,
unsigned specifierLen) override;
+ void handleInvalidMaskType(StringRef MaskType) override;
+
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen) override;
@@ -7080,6 +7261,10 @@ bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
CS.getStart(), CS.getLength());
}
+void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
+ S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
+}
+
bool CheckPrintfHandler::HandleAmount(
const analyze_format_string::OptionalAmount &Amt,
unsigned k, const char *startSpecifier,
@@ -7276,10 +7461,9 @@ bool CheckPrintfHandler::checkForCStrMembers(
if (Method->getMinRequiredArguments() == 0 &&
AT.matchesType(S.Context, Method->getReturnType())) {
// FIXME: Suggest parens if the expression needs them.
- SourceLocation EndLoc = S.getLocForEndOfToken(E->getLocEnd());
- S.Diag(E->getLocStart(), diag::note_printf_c_str)
- << "c_str()"
- << FixItHint::CreateInsertion(EndLoc, ".c_str()");
+ SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc());
+ S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
+ << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
return true;
}
}
@@ -7353,22 +7537,22 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
ArgType(S.Context.IntTy) : ArgType::CPointerTy;
if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
EmitFormatDiagnostic(
- S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
- << false << Ex->getSourceRange(),
- Ex->getLocStart(), /*IsStringLocation*/false,
- getSpecifierRange(startSpecifier, specifierLen));
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << false << Ex->getSourceRange(),
+ Ex->getBeginLoc(), /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
// Type check the second argument (char * for both %b and %D)
Ex = getDataArg(argIndex + 1);
const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
EmitFormatDiagnostic(
- S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
- << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
- << false << Ex->getSourceRange(),
- Ex->getLocStart(), /*IsStringLocation*/false,
- getSpecifierRange(startSpecifier, specifierLen));
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << false << Ex->getSourceRange(),
+ Ex->getBeginLoc(), /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
return true;
}
@@ -7585,6 +7769,30 @@ shouldNotPrintDirectly(const ASTContext &Context,
return std::make_pair(QualType(), StringRef());
}
+/// Return true if \p ICE is an implicit argument promotion of an arithmetic
+/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
+/// type do not count.
+static bool
+isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
+ QualType From = ICE->getSubExpr()->getType();
+ QualType To = ICE->getType();
+ // It's an integer promotion if the destination type is the promoted
+ // source type.
+ if (ICE->getCastKind() == CK_IntegralCast &&
+ From->isPromotableIntegerType() &&
+ S.Context.getPromotedIntegerType(From) == To)
+ return true;
+ // Look through vector types, since we do default argument promotion for
+ // those in OpenCL.
+ if (const auto *VecTy = From->getAs<ExtVectorType>())
+ From = VecTy->getElementType();
+ if (const auto *VecTy = To->getAs<ExtVectorType>())
+ To = VecTy->getElementType();
+ // It's a floating promotion if the source type is a lower rank.
+ return ICE->getCastKind() == CK_FloatingCast &&
+ S.Context.getFloatingTypeOrder(From, To) < 0;
+}
+
bool
CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
@@ -7612,11 +7820,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// Look through argument promotions for our error message's reported type.
// This includes the integral and floating promotions, but excludes array
- // and function pointer decay; seeing that an argument intended to be a
- // string has type 'char [6]' is probably more confusing than 'char *'.
+ // and function pointer decay (seeing that an argument intended to be a
+ // string has type 'char [6]' is probably more confusing than 'char *') and
+ // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- if (ICE->getCastKind() == CK_IntegralCast ||
- ICE->getCastKind() == CK_FloatingCast) {
+ if (isArithmeticArgumentPromotion(S, ICE)) {
E = ICE->getSubExpr();
ExprTy = E->getType();
@@ -7666,7 +7874,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
return true;
}
- LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getLocStart(),
+ LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(),
Sema::LookupOrdinaryName);
if (S.LookupName(Result, S.getCurScope())) {
NamedDecl *ND = Result.getFoundDecl();
@@ -7718,7 +7926,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
EmitFormatDiagnostic(S.PDiag(Diag)
<< AT.getRepresentativeTypeName(S.Context)
<< IntendedTy << IsEnum << E->getSourceRange(),
- E->getLocStart(),
+ E->getBeginLoc(),
/*IsStringLocation*/ false, SpecRange,
FixItHint::CreateReplacement(SpecRange, os.str()));
} else {
@@ -7747,15 +7955,15 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
} else if (!requiresParensToAddCast(E)) {
// If the expression has high enough precedence,
// just write the C-style cast.
- Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
- CastFix.str()));
+ Hints.push_back(
+ FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
} else {
// Otherwise, add parens around the expression as well as the cast.
CastFix << "(";
- Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(),
- CastFix.str()));
+ Hints.push_back(
+ FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
- SourceLocation After = S.getLocForEndOfToken(E->getLocEnd());
+ SourceLocation After = S.getLocForEndOfToken(E->getEndLoc());
Hints.push_back(FixItHint::CreateInsertion(After, ")"));
}
@@ -7773,18 +7981,17 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
: diag::warn_format_argument_needs_cast;
EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum
<< E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation=*/false,
+ E->getBeginLoc(), /*IsStringLocation=*/false,
SpecRange, Hints);
} else {
// In this case, the expression could be printed using a different
// specifier, but we've decided that the specifier is probably correct
// and we should cast instead. Just use the normal warning message.
EmitFormatDiagnostic(
- S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
- << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/false,
- SpecRange, Hints);
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
+ << E->getSourceRange(),
+ E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
}
}
} else {
@@ -7804,41 +8011,34 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
EmitFormatDiagnostic(
S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy
<< IsEnum << CSR << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/ false, CSR);
+ E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
break;
}
case Sema::VAK_Undefined:
case Sema::VAK_MSVCUndefined:
- EmitFormatDiagnostic(
- S.PDiag(diag::warn_non_pod_vararg_with_format_string)
- << S.getLangOpts().CPlusPlus11
- << ExprTy
- << CallType
- << AT.getRepresentativeTypeName(S.Context)
- << CSR
- << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/false, CSR);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string)
+ << S.getLangOpts().CPlusPlus11 << ExprTy
+ << CallType
+ << AT.getRepresentativeTypeName(S.Context) << CSR
+ << E->getSourceRange(),
+ E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
checkForCStrMembers(AT, E);
break;
case Sema::VAK_Invalid:
if (ExprTy->isObjCObjectType())
EmitFormatDiagnostic(
- S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
- << S.getLangOpts().CPlusPlus11
- << ExprTy
- << CallType
- << AT.getRepresentativeTypeName(S.Context)
- << CSR
- << E->getSourceRange(),
- E->getLocStart(), /*IsStringLocation*/false, CSR);
+ S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
+ << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
+ << AT.getRepresentativeTypeName(S.Context) << CSR
+ << E->getSourceRange(),
+ E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
else
// FIXME: If this is an initializer list, suggest removing the braces
// or inserting a cast to the target type.
- S.Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg_format)
- << isa<InitListExpr>(E) << ExprTy << CallType
- << AT.getRepresentativeTypeName(S.Context)
- << E->getSourceRange();
+ S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
+ << isa<InitListExpr>(E) << ExprTy << CallType
+ << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
break;
}
@@ -8008,7 +8208,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
EmitFormatDiagnostic(
S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context)
<< Ex->getType() << false << Ex->getSourceRange(),
- Ex->getLocStart(),
+ Ex->getBeginLoc(),
/*IsStringLocation*/ false,
getSpecifierRange(startSpecifier, specifierLen),
FixItHint::CreateReplacement(
@@ -8017,7 +8217,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
EmitFormatDiagnostic(S.PDiag(Diag)
<< AT.getRepresentativeTypeName(S.Context)
<< Ex->getType() << false << Ex->getSourceRange(),
- Ex->getLocStart(),
+ Ex->getBeginLoc(),
/*IsStringLocation*/ false,
getSpecifierRange(startSpecifier, specifierLen));
}
@@ -8038,9 +8238,9 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
// CHECK: is the format string a wide literal?
if (!FExpr->isAscii() && !FExpr->isUTF8()) {
CheckFormatHandler::EmitFormatDiagnostic(
- S, inFunctionCall, Args[format_idx],
- S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
- /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ S, inFunctionCall, Args[format_idx],
+ S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
+ /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
return;
}
@@ -8062,7 +8262,7 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
CheckFormatHandler::EmitFormatDiagnostic(
S, inFunctionCall, Args[format_idx],
S.PDiag(diag::warn_printf_format_string_not_null_terminated),
- FExpr->getLocStart(),
+ FExpr->getBeginLoc(),
/*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
return;
}
@@ -8070,9 +8270,9 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
// CHECK: empty format string?
if (StrLen == 0 && numDataArgs > 0) {
CheckFormatHandler::EmitFormatDiagnostic(
- S, inFunctionCall, Args[format_idx],
- S.PDiag(diag::warn_empty_format_string), FExpr->getLocStart(),
- /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ S, inFunctionCall, Args[format_idx],
+ S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
+ /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
return;
}
@@ -8587,8 +8787,9 @@ static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
<< SizeRange << FnName;
S.Diag(FnLoc, diag::note_memsize_comparison_paren)
- << FnName << FixItHint::CreateInsertion(
- S.getLocForEndOfToken(Size->getLHS()->getLocEnd()), ")")
+ << FnName
+ << FixItHint::CreateInsertion(
+ S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
<< FixItHint::CreateRemoval(RParenLoc);
S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
<< FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
@@ -8853,7 +9054,7 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
- Call->getLocStart(), Call->getRParenLoc()))
+ Call->getBeginLoc(), Call->getRParenLoc()))
return;
// Catch cases like 'memset(buf, sizeof(buf), 0)'.
@@ -9075,7 +9276,7 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
const Expr *CompareWithSrc = nullptr;
if (CheckMemorySizeofForComparison(*this, SizeArg, FnName,
- Call->getLocStart(), Call->getRParenLoc()))
+ Call->getBeginLoc(), Call->getRParenLoc()))
return;
// Look for 'strlcpy(dst, x, sizeof(x))'
@@ -9107,8 +9308,8 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
return;
const Expr *OriginalSizeArg = Call->getArg(2);
- Diag(CompareWithSrcDRE->getLocStart(), diag::warn_strlcpycat_wrong_size)
- << OriginalSizeArg->getSourceRange() << FnName;
+ Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
+ << OriginalSizeArg->getSourceRange() << FnName;
// Output a FIXIT hint if the destination is an array (rather than a
// pointer to an array). This could be enhanced to handle some
@@ -9124,9 +9325,9 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
DstArg->printPretty(OS, nullptr, getPrintingPolicy());
OS << ")";
- Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size)
- << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
- OS.str());
+ Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
+ << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
+ OS.str());
}
/// Check if two expressions refer to the same declaration.
@@ -9159,7 +9360,7 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
- if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getLocStart(),
+ if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(),
CE->getRParenLoc()))
return;
@@ -9191,7 +9392,7 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
return;
// Generate the diagnostic.
- SourceLocation SL = LenArg->getLocStart();
+ SourceLocation SL = LenArg->getBeginLoc();
SourceRange SR = LenArg->getSourceRange();
SourceManager &SM = getSourceManager();
@@ -9738,7 +9939,7 @@ static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
return true;
// Suppress cases where the '0' value is expanded from a macro.
- if (E->getLocStart().isMacroID())
+ if (E->getBeginLoc().isMacroID())
return true;
return false;
@@ -10159,8 +10360,8 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
Expr *OriginalInit = Init->IgnoreParenImpCasts();
unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
- llvm::APSInt Value;
- if (!OriginalInit->EvaluateAsInt(Value, S.Context,
+ Expr::EvalResult Result;
+ if (!OriginalInit->EvaluateAsInt(Result, S.Context,
Expr::SE_AllowSideEffects)) {
// The RHS is not constant. If the RHS has an enum type, make sure the
// bitfield is wide enough to hold all the values of the enum without
@@ -10216,6 +10417,8 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
return false;
}
+ llvm::APSInt Value = Result.Val.getInt();
+
unsigned OriginalWidth = Value.getBitWidth();
if (!Value.isSigned() || Value.isNegative())
@@ -10268,6 +10471,10 @@ static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
}
AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+
+ // Diagnose implicitly sequentially-consistent atomic assignment.
+ if (E->getLHS()->getType()->isAtomicType())
+ S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
}
/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
@@ -10292,33 +10499,6 @@ static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
}
-/// Analyze the given compound assignment for the possible losing of
-/// floating-point precision.
-static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
- assert(isa<CompoundAssignOperator>(E) &&
- "Must be compound assignment operation");
- // Recurse on the LHS and RHS in here
- AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
- AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
-
- // Now check the outermost expression
- const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
- const auto *RBT = cast<CompoundAssignOperator>(E)
- ->getComputationResultType()
- ->getAs<BuiltinType>();
-
- // If both source and target are floating points.
- if (ResultBT && ResultBT->isFloatingPoint() && RBT && RBT->isFloatingPoint())
- // Builtin FP kinds are ordered by increasing FP rank.
- if (ResultBT->getKind() < RBT->getKind())
- // We don't want to warn for system macro.
- if (!S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
- // warn about dropping FP rank.
- DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(),
- E->getOperatorLoc(),
- diag::warn_impcast_float_result_precision);
-}
-
/// Diagnose an implicit cast from a floating point value to an integer value.
static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
SourceLocation CContext) {
@@ -10421,6 +10601,42 @@ static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
}
}
+/// Analyze the given compound assignment for the possible losing of
+/// floating-point precision.
+static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
+ assert(isa<CompoundAssignOperator>(E) &&
+ "Must be compound assignment operation");
+ // Recurse on the LHS and RHS in here
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+
+ if (E->getLHS()->getType()->isAtomicType())
+ S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
+
+ // Now check the outermost expression
+ const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
+ const auto *RBT = cast<CompoundAssignOperator>(E)
+ ->getComputationResultType()
+ ->getAs<BuiltinType>();
+
+ // The below checks assume source is floating point.
+ if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
+
+ // If source is floating point but target is not.
+ if (!ResultBT->isFloatingPoint())
+ return DiagnoseFloatingImpCast(S, E, E->getRHS()->getType(),
+ E->getExprLoc());
+
+ // If both source and target are floating points.
+ // Builtin FP kinds are ordered by increasing FP rank.
+ if (ResultBT->getKind() < RBT->getKind() &&
+ // We don't want to warn for system macro.
+ !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
+ // warn about dropping FP rank.
+ DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
+ diag::warn_impcast_float_result_precision);
+}
+
static std::string PrettyPrintInRange(const llvm::APSInt &Value,
IntRange Range) {
if (!Range.Width) return "0";
@@ -10545,10 +10761,9 @@ static void checkObjCCollectionLiteralElement(Sema &S,
ElementResult,
false, false)
!= Sema::Compatible) {
- S.Diag(Element->getLocStart(),
- diag::warn_objc_collection_literal_element)
- << ElementType << ElementKind << TargetElementType
- << Element->getSourceRange();
+ S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
+ << ElementType << ElementKind << TargetElementType
+ << Element->getSourceRange();
}
if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element))
@@ -10624,7 +10839,7 @@ static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
// to fill all the bits, even if there is a sign change.
if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) {
const char FirstLiteralCharacter =
- S.getSourceManager().getCharacterData(IntLit->getLocStart())[0];
+ S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0];
if (FirstLiteralCharacter == '0')
return false;
}
@@ -10659,6 +10874,9 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
if (CC.isInvalid())
return;
+ if (Source->isAtomicType())
+ S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
+
// Diagnose implicit casts to bool.
if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
if (isa<StringLiteral>(E))
@@ -10813,8 +11031,11 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
if (SourceRange.Width > TargetRange.Width) {
// If the source is a constant, use a default-on diagnostic.
// TODO: this should happen for bitfield stores, too.
- llvm::APSInt Value(32);
- if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects)) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Value(32);
+ Value = Result.Val.getInt();
+
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -10839,15 +11060,29 @@ CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC,
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
}
+ if (TargetRange.Width > SourceRange.Width) {
+ if (auto *UO = dyn_cast<UnaryOperator>(E))
+ if (UO->getOpcode() == UO_Minus)
+ if (Source->isUnsignedIntegerType()) {
+ if (Target->isUnsignedIntegerType())
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_high_order_zero_bits);
+ if (Target->isSignedIntegerType())
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_nonnegative_result);
+ }
+ }
+
if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative &&
SourceRange.NonNegative && Source->isSignedIntegerType()) {
// Warn when doing a signed to signed conversion, warn if the positive
// source value is exactly the width of the target type, which will
// cause a negative value to be stored.
- llvm::APSInt Value;
- if (E->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects) &&
+ Expr::EvalResult Result;
+ if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) &&
!S.SourceMgr.isInSystemMacro(CC)) {
+ llvm::APSInt Value = Result.Val.getInt();
if (isSameWidthConstantConversion(S, E, T, CC)) {
std::string PrettySourceValue = Value.toString(10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
@@ -10954,11 +11189,13 @@ static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
E->getType(), CC, &Suspicious);
}
-/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
+/// Check conversion of given expression to boolean.
/// Input argument E is a logical expression.
static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
if (S.getLangOpts().Bool)
return;
+ if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
+ return;
CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
}
@@ -11003,8 +11240,10 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
}
// Skip past explicit casts.
- if (isa<ExplicitCastExpr>(E)) {
- E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
+ if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) {
+ E = CE->getSubExpr()->IgnoreParenImpCasts();
+ if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
+ S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
return AnalyzeImplicitConversions(S, E, CC);
}
@@ -11057,9 +11296,15 @@ static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE,
::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc());
}
- if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E))
- if (U->getOpcode() == UO_LNot)
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
+ if (U->getOpcode() == UO_LNot) {
::CheckBoolLikeConversion(S, U->getSubExpr(), CC);
+ } else if (U->getOpcode() != UO_AddrOf) {
+ if (U->getSubExpr()->getType()->isAtomicType())
+ S.Diag(U->getSubExpr()->getBeginLoc(),
+ diag::warn_atomic_implicit_seq_cst);
+ }
+ }
}
/// Diagnose integer type and any valid implicit conversion to it.
@@ -11067,13 +11312,13 @@ static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
// Taking into account implicit conversions,
// allow any integer.
if (!E->getType()->isIntegerType()) {
- S.Diag(E->getLocStart(),
+ S.Diag(E->getBeginLoc(),
diag::err_opencl_enqueue_kernel_invalid_local_size_type);
return true;
}
// Potentially emit standard warnings for implicit conversions if enabled
// using -Wconversion.
- CheckImplicitConversion(S, E, IntT, E->getLocStart());
+ CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
return false;
}
@@ -11281,7 +11526,7 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
// Suggest '&' to silence the function warning.
Diag(E->getExprLoc(), diag::note_function_warning_silence)
- << FixItHint::CreateInsertion(E->getLocStart(), "&");
+ << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
// Check to see if '()' fixit should be emitted.
QualType ReturnType;
@@ -11310,7 +11555,7 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
return;
}
Diag(E->getExprLoc(), diag::note_function_to_function_call)
- << FixItHint::CreateInsertion(getLocForEndOfToken(E->getLocEnd()), "()");
+ << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
}
/// Diagnoses "dangerous" implicit conversions within the given
@@ -11663,30 +11908,42 @@ public:
notePostUse(O, E);
}
- void VisitBinComma(BinaryOperator *BO) {
- // C++11 [expr.comma]p1:
- // Every value computation and side effect associated with the left
- // expression is sequenced before every value computation and side
- // effect associated with the right expression.
- SequenceTree::Seq LHS = Tree.allocate(Region);
- SequenceTree::Seq RHS = Tree.allocate(Region);
+ void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) {
+ SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
+ SequenceTree::Seq AfterRegion = Tree.allocate(Region);
SequenceTree::Seq OldRegion = Region;
{
- SequencedSubexpression SeqLHS(*this);
- Region = LHS;
- Visit(BO->getLHS());
+ SequencedSubexpression SeqBefore(*this);
+ Region = BeforeRegion;
+ Visit(SequencedBefore);
}
- Region = RHS;
- Visit(BO->getRHS());
+ Region = AfterRegion;
+ Visit(SequencedAfter);
Region = OldRegion;
- // Forget that LHS and RHS are sequenced. They are both unsequenced
- // with respect to other stuff.
- Tree.merge(LHS);
- Tree.merge(RHS);
+ Tree.merge(BeforeRegion);
+ Tree.merge(AfterRegion);
+ }
+
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) {
+ // C++17 [expr.sub]p1:
+ // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
+ // expression E1 is sequenced before the expression E2.
+ if (SemaRef.getLangOpts().CPlusPlus17)
+ VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
+ else
+ Base::VisitStmt(ASE);
+ }
+
+ void VisitBinComma(BinaryOperator *BO) {
+ // C++11 [expr.comma]p1:
+ // Every value computation and side effect associated with the left
+ // expression is sequenced before every value computation and side
+ // effect associated with the right expression.
+ VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
}
void VisitBinAssign(BinaryOperator *BO) {
@@ -11992,6 +12249,18 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
if (!Param->getType().isConstQualified())
Diag(Param->getLocation(), diag::err_attribute_pointers_only)
<< Attr->getSpelling() << 1;
+
+ // Check for parameter names shadowing fields from the class.
+ if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
+ // The owning context for the parameter should be the function, but we
+ // want to see if this function's declaration context is a record.
+ DeclContext *DC = Param->getDeclContext();
+ if (DC && DC->isFunctionOrMethod()) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
+ CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(),
+ RD, /*DeclIsField*/ false);
+ }
+ }
}
return HasInvalidParm;
@@ -12120,13 +12389,18 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
BaseExpr->getType()->getPointeeOrArrayElementType();
BaseExpr = BaseExpr->IgnoreParenCasts();
const ConstantArrayType *ArrayTy =
- Context.getAsConstantArrayType(BaseExpr->getType());
+ Context.getAsConstantArrayType(BaseExpr->getType());
+
if (!ArrayTy)
return;
- llvm::APSInt index;
- if (!IndexExpr->EvaluateAsInt(index, Context, Expr::SE_AllowSideEffects))
+ const Type *BaseType = ArrayTy->getElementType().getTypePtr();
+
+ Expr::EvalResult Result;
+ if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects))
return;
+
+ llvm::APSInt index = Result.Val.getInt();
if (IndexNegated)
index = -index;
@@ -12137,11 +12411,19 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
ND = ME->getMemberDecl();
if (index.isUnsigned() || !index.isNegative()) {
+ // It is possible that the type of the base expression after
+ // IgnoreParenCasts is incomplete, even though the type of the base
+ // expression before IgnoreParenCasts is complete (see PR39746 for an
+ // example). In this case we have no information about whether the array
+ // access exceeds the array bounds. However we can still diagnose an array
+ // access which precedes the array bounds.
+ if (BaseType->isIncompleteType())
+ return;
+
llvm::APInt size = ArrayTy->getSize();
if (!size.isStrictlyPositive())
return;
- const Type *BaseType = BaseExpr->getType()->getPointeeOrArrayElementType();
if (BaseType != EffectiveType) {
// Make sure we're comparing apples to apples when comparing index to size
uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
@@ -12183,8 +12465,8 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
ASE->getRBracketLoc());
if (SourceMgr.isInSystemHeader(RBracketLoc)) {
- SourceLocation IndexLoc = SourceMgr.getSpellingLoc(
- IndexExpr->getLocStart());
+ SourceLocation IndexLoc =
+ SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc());
if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc))
return;
}
@@ -12194,11 +12476,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (ASE)
DiagID = diag::warn_array_index_exceeds_bounds;
- DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
PDiag(DiagID) << index.toString(10, true)
- << size.toString(10, true)
- << (unsigned)size.getLimitedValue(~0U)
- << IndexExpr->getSourceRange());
+ << size.toString(10, true)
+ << (unsigned)size.getLimitedValue(~0U)
+ << IndexExpr->getSourceRange());
} else {
unsigned DiagID = diag::warn_array_index_precedes_bounds;
if (!ASE) {
@@ -12206,9 +12488,9 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (index.isNegative()) index = -index;
}
- DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
PDiag(DiagID) << index.toString(10, true)
- << IndexExpr->getSourceRange());
+ << IndexExpr->getSourceRange());
}
if (!ND) {
@@ -12223,9 +12505,9 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
}
if (ND)
- DiagRuntimeBehavior(ND->getLocStart(), BaseExpr,
+ DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
PDiag(diag::note_array_index_out_of_bounds)
- << ND->getDeclName());
+ << ND->getDeclName());
}
void Sema::CheckArrayAccess(const Expr *expr) {
@@ -12965,15 +13247,13 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
if (!ProbableTypo) {
bool BodyColInvalid;
unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
- PossibleBody->getLocStart(),
- &BodyColInvalid);
+ PossibleBody->getBeginLoc(), &BodyColInvalid);
if (BodyColInvalid)
return;
bool StmtColInvalid;
- unsigned StmtCol = SourceMgr.getPresumedColumnNumber(
- S->getLocStart(),
- &StmtColInvalid);
+ unsigned StmtCol =
+ SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid);
if (StmtColInvalid)
return;
@@ -13493,7 +13773,7 @@ void Sema::DiagnoseMisalignedMembers() {
if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
ND = TD;
}
- Diag(m.E->getLocStart(), diag::warn_taking_address_of_packed_member)
+ Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
<< m.MD << ND << m.E->getSourceRange();
}
MisalignedMembers.clear();
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 30af826ef6cc..d9f007a46da5 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -10,6 +10,8 @@
// This file defines the code-completion semantic actions.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
@@ -32,6 +34,8 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Path.h"
#include <list>
#include <map>
#include <vector>
@@ -40,315 +44,309 @@ using namespace clang;
using namespace sema;
namespace {
- /// A container of code-completion results.
- class ResultBuilder {
- public:
- /// The type of a name-lookup filter, which can be provided to the
- /// name-lookup routines to specify which declarations should be included in
- /// the result set (when it returns true) and which declarations should be
- /// filtered out (returns false).
- typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
-
- typedef CodeCompletionResult Result;
+/// A container of code-completion results.
+class ResultBuilder {
+public:
+ /// The type of a name-lookup filter, which can be provided to the
+ /// name-lookup routines to specify which declarations should be included in
+ /// the result set (when it returns true) and which declarations should be
+ /// filtered out (returns false).
+ typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
- private:
- /// The actual results we have found.
- std::vector<Result> Results;
+ typedef CodeCompletionResult Result;
- /// A record of all of the declarations we have found and placed
- /// into the result set, used to ensure that no declaration ever gets into
- /// the result set twice.
- llvm::SmallPtrSet<const Decl*, 16> AllDeclsFound;
+private:
+ /// The actual results we have found.
+ std::vector<Result> Results;
- typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
+ /// A record of all of the declarations we have found and placed
+ /// into the result set, used to ensure that no declaration ever gets into
+ /// the result set twice.
+ llvm::SmallPtrSet<const Decl *, 16> AllDeclsFound;
- /// An entry in the shadow map, which is optimized to store
- /// a single (declaration, index) mapping (the common case) but
- /// can also store a list of (declaration, index) mappings.
- class ShadowMapEntry {
- typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
+ typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
- /// Contains either the solitary NamedDecl * or a vector
- /// of (declaration, index) pairs.
- llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector*> DeclOrVector;
+ /// An entry in the shadow map, which is optimized to store
+ /// a single (declaration, index) mapping (the common case) but
+ /// can also store a list of (declaration, index) mappings.
+ class ShadowMapEntry {
+ typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
- /// When the entry contains a single declaration, this is
- /// the index associated with that entry.
- unsigned SingleDeclIndex;
+ /// Contains either the solitary NamedDecl * or a vector
+ /// of (declaration, index) pairs.
+ llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector *> DeclOrVector;
- public:
- ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) { }
+ /// When the entry contains a single declaration, this is
+ /// the index associated with that entry.
+ unsigned SingleDeclIndex;
- void Add(const NamedDecl *ND, unsigned Index) {
- if (DeclOrVector.isNull()) {
- // 0 - > 1 elements: just set the single element information.
- DeclOrVector = ND;
- SingleDeclIndex = Index;
- return;
- }
+ public:
+ ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) {}
- if (const NamedDecl *PrevND =
- DeclOrVector.dyn_cast<const NamedDecl *>()) {
- // 1 -> 2 elements: create the vector of results and push in the
- // existing declaration.
- DeclIndexPairVector *Vec = new DeclIndexPairVector;
- Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
- DeclOrVector = Vec;
- }
+ void Add(const NamedDecl *ND, unsigned Index) {
+ if (DeclOrVector.isNull()) {
+ // 0 - > 1 elements: just set the single element information.
+ DeclOrVector = ND;
+ SingleDeclIndex = Index;
+ return;
+ }
- // Add the new element to the end of the vector.
- DeclOrVector.get<DeclIndexPairVector*>()->push_back(
- DeclIndexPair(ND, Index));
+ if (const NamedDecl *PrevND =
+ DeclOrVector.dyn_cast<const NamedDecl *>()) {
+ // 1 -> 2 elements: create the vector of results and push in the
+ // existing declaration.
+ DeclIndexPairVector *Vec = new DeclIndexPairVector;
+ Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
+ DeclOrVector = Vec;
}
- void Destroy() {
- if (DeclIndexPairVector *Vec
- = DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
- delete Vec;
- DeclOrVector = ((NamedDecl *)nullptr);
- }
+ // Add the new element to the end of the vector.
+ DeclOrVector.get<DeclIndexPairVector *>()->push_back(
+ DeclIndexPair(ND, Index));
+ }
+
+ void Destroy() {
+ if (DeclIndexPairVector *Vec =
+ DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
+ delete Vec;
+ DeclOrVector = ((NamedDecl *)nullptr);
}
+ }
- // Iteration.
- class iterator;
- iterator begin() const;
- iterator end() const;
- };
+ // Iteration.
+ class iterator;
+ iterator begin() const;
+ iterator end() const;
+ };
- /// A mapping from declaration names to the declarations that have
- /// this name within a particular scope and their index within the list of
- /// results.
- typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+ /// A mapping from declaration names to the declarations that have
+ /// this name within a particular scope and their index within the list of
+ /// results.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
- /// The semantic analysis object for which results are being
- /// produced.
- Sema &SemaRef;
+ /// The semantic analysis object for which results are being
+ /// produced.
+ Sema &SemaRef;
- /// The allocator used to allocate new code-completion strings.
- CodeCompletionAllocator &Allocator;
+ /// The allocator used to allocate new code-completion strings.
+ CodeCompletionAllocator &Allocator;
- CodeCompletionTUInfo &CCTUInfo;
+ CodeCompletionTUInfo &CCTUInfo;
- /// If non-NULL, a filter function used to remove any code-completion
- /// results that are not desirable.
- LookupFilter Filter;
+ /// If non-NULL, a filter function used to remove any code-completion
+ /// results that are not desirable.
+ LookupFilter Filter;
- /// Whether we should allow declarations as
- /// nested-name-specifiers that would otherwise be filtered out.
- bool AllowNestedNameSpecifiers;
+ /// Whether we should allow declarations as
+ /// nested-name-specifiers that would otherwise be filtered out.
+ bool AllowNestedNameSpecifiers;
- /// If set, the type that we would prefer our resulting value
- /// declarations to have.
- ///
- /// Closely matching the preferred type gives a boost to a result's
- /// priority.
- CanQualType PreferredType;
+ /// If set, the type that we would prefer our resulting value
+ /// declarations to have.
+ ///
+ /// Closely matching the preferred type gives a boost to a result's
+ /// priority.
+ CanQualType PreferredType;
- /// A list of shadow maps, which is used to model name hiding at
- /// different levels of, e.g., the inheritance hierarchy.
- std::list<ShadowMap> ShadowMaps;
+ /// A list of shadow maps, which is used to model name hiding at
+ /// different levels of, e.g., the inheritance hierarchy.
+ std::list<ShadowMap> ShadowMaps;
- /// If we're potentially referring to a C++ member function, the set
- /// of qualifiers applied to the object type.
- Qualifiers ObjectTypeQualifiers;
+ /// If we're potentially referring to a C++ member function, the set
+ /// of qualifiers applied to the object type.
+ Qualifiers ObjectTypeQualifiers;
- /// Whether the \p ObjectTypeQualifiers field is active.
- bool HasObjectTypeQualifiers;
+ /// Whether the \p ObjectTypeQualifiers field is active.
+ bool HasObjectTypeQualifiers;
- /// The selector that we prefer.
- Selector PreferredSelector;
+ /// The selector that we prefer.
+ Selector PreferredSelector;
- /// The completion context in which we are gathering results.
- CodeCompletionContext CompletionContext;
+ /// The completion context in which we are gathering results.
+ CodeCompletionContext CompletionContext;
- /// If we are in an instance method definition, the \@implementation
- /// object.
- ObjCImplementationDecl *ObjCImplementation;
+ /// If we are in an instance method definition, the \@implementation
+ /// object.
+ ObjCImplementationDecl *ObjCImplementation;
- void AdjustResultPriorityForDecl(Result &R);
+ void AdjustResultPriorityForDecl(Result &R);
- void MaybeAddConstructorResults(Result R);
+ void MaybeAddConstructorResults(Result R);
- public:
- explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- const CodeCompletionContext &CompletionContext,
- LookupFilter Filter = nullptr)
+public:
+ explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ const CodeCompletionContext &CompletionContext,
+ LookupFilter Filter = nullptr)
: SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
- Filter(Filter),
- AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
- CompletionContext(CompletionContext),
- ObjCImplementation(nullptr)
- {
- // If this is an Objective-C instance method definition, dig out the
- // corresponding implementation.
- switch (CompletionContext.getKind()) {
- case CodeCompletionContext::CCC_Expression:
- case CodeCompletionContext::CCC_ObjCMessageReceiver:
- case CodeCompletionContext::CCC_ParenthesizedExpression:
- case CodeCompletionContext::CCC_Statement:
- case CodeCompletionContext::CCC_Recovery:
- if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
- if (Method->isInstanceMethod())
- if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
- ObjCImplementation = Interface->getImplementation();
- break;
+ Filter(Filter), AllowNestedNameSpecifiers(false),
+ HasObjectTypeQualifiers(false), CompletionContext(CompletionContext),
+ ObjCImplementation(nullptr) {
+ // If this is an Objective-C instance method definition, dig out the
+ // corresponding implementation.
+ switch (CompletionContext.getKind()) {
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Recovery:
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->isInstanceMethod())
+ if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
+ ObjCImplementation = Interface->getImplementation();
+ break;
- default:
- break;
- }
- }
-
- /// Determine the priority for a reference to the given declaration.
- unsigned getBasePriority(const NamedDecl *D);
-
- /// Whether we should include code patterns in the completion
- /// results.
- bool includeCodePatterns() const {
- return SemaRef.CodeCompleter &&
- SemaRef.CodeCompleter->includeCodePatterns();
- }
-
- /// Set the filter used for code-completion results.
- void setFilter(LookupFilter Filter) {
- this->Filter = Filter;
- }
-
- Result *data() { return Results.empty()? nullptr : &Results.front(); }
- unsigned size() const { return Results.size(); }
- bool empty() const { return Results.empty(); }
-
- /// Specify the preferred type.
- void setPreferredType(QualType T) {
- PreferredType = SemaRef.Context.getCanonicalType(T);
+ default:
+ break;
}
+ }
- /// Set the cv-qualifiers on the object type, for us in filtering
- /// calls to member functions.
- ///
- /// When there are qualifiers in this set, they will be used to filter
- /// out member functions that aren't available (because there will be a
- /// cv-qualifier mismatch) or prefer functions with an exact qualifier
- /// match.
- void setObjectTypeQualifiers(Qualifiers Quals) {
- ObjectTypeQualifiers = Quals;
- HasObjectTypeQualifiers = true;
- }
-
- /// Set the preferred selector.
- ///
- /// When an Objective-C method declaration result is added, and that
- /// method's selector matches this preferred selector, we give that method
- /// a slight priority boost.
- void setPreferredSelector(Selector Sel) {
- PreferredSelector = Sel;
- }
-
- /// Retrieve the code-completion context for which results are
- /// being collected.
- const CodeCompletionContext &getCompletionContext() const {
- return CompletionContext;
- }
-
- /// Specify whether nested-name-specifiers are allowed.
- void allowNestedNameSpecifiers(bool Allow = true) {
- AllowNestedNameSpecifiers = Allow;
- }
-
- /// Return the semantic analysis object for which we are collecting
- /// code completion results.
- Sema &getSema() const { return SemaRef; }
-
- /// Retrieve the allocator used to allocate code completion strings.
- CodeCompletionAllocator &getAllocator() const { return Allocator; }
-
- CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
-
- /// Determine whether the given declaration is at all interesting
- /// as a code-completion result.
- ///
- /// \param ND the declaration that we are inspecting.
- ///
- /// \param AsNestedNameSpecifier will be set true if this declaration is
- /// only interesting when it is a nested-name-specifier.
- bool isInterestingDecl(const NamedDecl *ND,
- bool &AsNestedNameSpecifier) const;
-
- /// Check whether the result is hidden by the Hiding declaration.
- ///
- /// \returns true if the result is hidden and cannot be found, false if
- /// the hidden result could still be found. When false, \p R may be
- /// modified to describe how the result can be found (e.g., via extra
- /// qualification).
- bool CheckHiddenResult(Result &R, DeclContext *CurContext,
- const NamedDecl *Hiding);
-
- /// Add a new result to this result set (if it isn't already in one
- /// of the shadow maps), or replace an existing result (for, e.g., a
- /// redeclaration).
- ///
- /// \param R the result to add (if it is unique).
- ///
- /// \param CurContext the context in which this result will be named.
- void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
-
- /// Add a new result to this result set, where we already know
- /// the hiding declaration (if any).
- ///
- /// \param R the result to add (if it is unique).
- ///
- /// \param CurContext the context in which this result will be named.
- ///
- /// \param Hiding the declaration that hides the result.
- ///
- /// \param InBaseClass whether the result was found in a base
- /// class of the searched context.
- void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
- bool InBaseClass);
-
- /// Add a new non-declaration result to this result set.
- void AddResult(Result R);
-
- /// Enter into a new scope.
- void EnterNewScope();
-
- /// Exit from the current scope.
- void ExitScope();
-
- /// Ignore this declaration, if it is seen again.
- void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
-
- /// Add a visited context.
- void addVisitedContext(DeclContext *Ctx) {
- CompletionContext.addVisitedContext(Ctx);
- }
-
- /// \name Name lookup predicates
- ///
- /// These predicates can be passed to the name lookup functions to filter the
- /// results of name lookup. All of the predicates have the same type, so that
- ///
- //@{
- bool IsOrdinaryName(const NamedDecl *ND) const;
- bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
- bool IsIntegralConstantValue(const NamedDecl *ND) const;
- bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
- bool IsNestedNameSpecifier(const NamedDecl *ND) const;
- bool IsEnum(const NamedDecl *ND) const;
- bool IsClassOrStruct(const NamedDecl *ND) const;
- bool IsUnion(const NamedDecl *ND) const;
- bool IsNamespace(const NamedDecl *ND) const;
- bool IsNamespaceOrAlias(const NamedDecl *ND) const;
- bool IsType(const NamedDecl *ND) const;
- bool IsMember(const NamedDecl *ND) const;
- bool IsObjCIvar(const NamedDecl *ND) const;
- bool IsObjCMessageReceiver(const NamedDecl *ND) const;
- bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
- bool IsObjCCollection(const NamedDecl *ND) const;
- bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
- //@}
- };
-}
+ /// Determine the priority for a reference to the given declaration.
+ unsigned getBasePriority(const NamedDecl *D);
+
+ /// Whether we should include code patterns in the completion
+ /// results.
+ bool includeCodePatterns() const {
+ return SemaRef.CodeCompleter &&
+ SemaRef.CodeCompleter->includeCodePatterns();
+ }
+
+ /// Set the filter used for code-completion results.
+ void setFilter(LookupFilter Filter) { this->Filter = Filter; }
+
+ Result *data() { return Results.empty() ? nullptr : &Results.front(); }
+ unsigned size() const { return Results.size(); }
+ bool empty() const { return Results.empty(); }
+
+ /// Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
+ }
+
+ /// Set the cv-qualifiers on the object type, for us in filtering
+ /// calls to member functions.
+ ///
+ /// When there are qualifiers in this set, they will be used to filter
+ /// out member functions that aren't available (because there will be a
+ /// cv-qualifier mismatch) or prefer functions with an exact qualifier
+ /// match.
+ void setObjectTypeQualifiers(Qualifiers Quals) {
+ ObjectTypeQualifiers = Quals;
+ HasObjectTypeQualifiers = true;
+ }
+
+ /// Set the preferred selector.
+ ///
+ /// When an Objective-C method declaration result is added, and that
+ /// method's selector matches this preferred selector, we give that method
+ /// a slight priority boost.
+ void setPreferredSelector(Selector Sel) { PreferredSelector = Sel; }
+
+ /// Retrieve the code-completion context for which results are
+ /// being collected.
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
+ }
+
+ /// Specify whether nested-name-specifiers are allowed.
+ void allowNestedNameSpecifiers(bool Allow = true) {
+ AllowNestedNameSpecifiers = Allow;
+ }
+
+ /// Return the semantic analysis object for which we are collecting
+ /// code completion results.
+ Sema &getSema() const { return SemaRef; }
+
+ /// Retrieve the allocator used to allocate code completion strings.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// Determine whether the given declaration is at all interesting
+ /// as a code-completion result.
+ ///
+ /// \param ND the declaration that we are inspecting.
+ ///
+ /// \param AsNestedNameSpecifier will be set true if this declaration is
+ /// only interesting when it is a nested-name-specifier.
+ bool isInterestingDecl(const NamedDecl *ND,
+ bool &AsNestedNameSpecifier) const;
+
+ /// Check whether the result is hidden by the Hiding declaration.
+ ///
+ /// \returns true if the result is hidden and cannot be found, false if
+ /// the hidden result could still be found. When false, \p R may be
+ /// modified to describe how the result can be found (e.g., via extra
+ /// qualification).
+ bool CheckHiddenResult(Result &R, DeclContext *CurContext,
+ const NamedDecl *Hiding);
+
+ /// Add a new result to this result set (if it isn't already in one
+ /// of the shadow maps), or replace an existing result (for, e.g., a
+ /// redeclaration).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
+
+ /// Add a new result to this result set, where we already know
+ /// the hiding declaration (if any).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ ///
+ /// \param Hiding the declaration that hides the result.
+ ///
+ /// \param InBaseClass whether the result was found in a base
+ /// class of the searched context.
+ void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
+ bool InBaseClass);
+
+ /// Add a new non-declaration result to this result set.
+ void AddResult(Result R);
+
+ /// Enter into a new scope.
+ void EnterNewScope();
+
+ /// Exit from the current scope.
+ void ExitScope();
+
+ /// Ignore this declaration, if it is seen again.
+ void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
+
+ /// Add a visited context.
+ void addVisitedContext(DeclContext *Ctx) {
+ CompletionContext.addVisitedContext(Ctx);
+ }
+
+ /// \name Name lookup predicates
+ ///
+ /// These predicates can be passed to the name lookup functions to filter the
+ /// results of name lookup. All of the predicates have the same type, so that
+ ///
+ //@{
+ bool IsOrdinaryName(const NamedDecl *ND) const;
+ bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
+ bool IsIntegralConstantValue(const NamedDecl *ND) const;
+ bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
+ bool IsNestedNameSpecifier(const NamedDecl *ND) const;
+ bool IsEnum(const NamedDecl *ND) const;
+ bool IsClassOrStruct(const NamedDecl *ND) const;
+ bool IsUnion(const NamedDecl *ND) const;
+ bool IsNamespace(const NamedDecl *ND) const;
+ bool IsNamespaceOrAlias(const NamedDecl *ND) const;
+ bool IsType(const NamedDecl *ND) const;
+ bool IsMember(const NamedDecl *ND) const;
+ bool IsObjCIvar(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiver(const NamedDecl *ND) const;
+ bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
+ bool IsObjCCollection(const NamedDecl *ND) const;
+ bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
+ //@}
+};
+} // namespace
class ResultBuilder::ShadowMapEntry::iterator {
llvm::PointerUnion<const NamedDecl *, const DeclIndexPair *> DeclOrIterator;
@@ -364,20 +362,18 @@ public:
DeclIndexPair Value;
public:
- pointer(const DeclIndexPair &Value) : Value(Value) { }
+ pointer(const DeclIndexPair &Value) : Value(Value) {}
- const DeclIndexPair *operator->() const {
- return &Value;
- }
+ const DeclIndexPair *operator->() const { return &Value; }
};
iterator() : DeclOrIterator((NamedDecl *)nullptr), SingleDeclIndex(0) {}
iterator(const NamedDecl *SingleDecl, unsigned Index)
- : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) { }
+ : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) {}
iterator(const DeclIndexPair *Iterator)
- : DeclOrIterator(Iterator), SingleDeclIndex(0) { }
+ : DeclOrIterator(Iterator), SingleDeclIndex(0) {}
iterator &operator++() {
if (DeclOrIterator.is<const NamedDecl *>()) {
@@ -386,7 +382,7 @@ public:
return *this;
}
- const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair*>();
+ const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair *>();
++I;
DeclOrIterator = I;
return *this;
@@ -402,17 +398,15 @@ public:
if (const NamedDecl *ND = DeclOrIterator.dyn_cast<const NamedDecl *>())
return reference(ND, SingleDeclIndex);
- return *DeclOrIterator.get<const DeclIndexPair*>();
+ return *DeclOrIterator.get<const DeclIndexPair *>();
}
- pointer operator->() const {
- return pointer(**this);
- }
+ pointer operator->() const { return pointer(**this); }
friend bool operator==(const iterator &X, const iterator &Y) {
- return X.DeclOrIterator.getOpaqueValue()
- == Y.DeclOrIterator.getOpaqueValue() &&
- X.SingleDeclIndex == Y.SingleDeclIndex;
+ return X.DeclOrIterator.getOpaqueValue() ==
+ Y.DeclOrIterator.getOpaqueValue() &&
+ X.SingleDeclIndex == Y.SingleDeclIndex;
}
friend bool operator!=(const iterator &X, const iterator &Y) {
@@ -453,8 +447,7 @@ ResultBuilder::ShadowMapEntry::end() const {
/// \returns a nested name specifier that refers into the target context, or
/// NULL if no qualification is needed.
static NestedNameSpecifier *
-getRequiredQualification(ASTContext &Context,
- const DeclContext *CurContext,
+getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
@@ -472,16 +465,14 @@ getRequiredQualification(ASTContext &Context,
while (!TargetParents.empty()) {
const DeclContext *Parent = TargetParents.pop_back_val();
- if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
+ if (const auto *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
if (!Namespace->getIdentifier())
continue;
Result = NestedNameSpecifier::Create(Context, Result, Namespace);
- }
- else if (const TagDecl *TD = dyn_cast<TagDecl>(Parent))
- Result = NestedNameSpecifier::Create(Context, Result,
- false,
- Context.getTypeDeclType(TD).getTypePtr());
+ } else if (const auto *TD = dyn_cast<TagDecl>(Parent))
+ Result = NestedNameSpecifier::Create(
+ Context, Result, false, Context.getTypeDeclType(TD).getTypePtr());
}
return Result;
}
@@ -494,8 +485,8 @@ static bool isReservedName(const IdentifierInfo *Id,
return false;
const char *Name = Id->getNameStart();
return Name[0] == '_' &&
- (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z' &&
- !doubleUnderscoreOnly));
+ (Name[1] == '_' ||
+ (Name[1] >= 'A' && Name[1] <= 'Z' && !doubleUnderscoreOnly));
}
// Some declarations have reserved names that we don't want to ever show.
@@ -514,9 +505,9 @@ static bool shouldIgnoreDueToReservedName(const NamedDecl *ND, Sema &SemaRef) {
// This allows for system headers providing private symbols with a single
// underscore.
if (isReservedName(Id, /*doubleUnderscoreOnly=*/true) &&
- SemaRef.SourceMgr.isInSystemHeader(
- SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))
- return true;
+ SemaRef.SourceMgr.isInSystemHeader(
+ SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))
+ return true;
return false;
}
@@ -550,10 +541,8 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
return false;
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
- (isa<NamespaceDecl>(ND) &&
- Filter != &ResultBuilder::IsNamespace &&
- Filter != &ResultBuilder::IsNamespaceOrAlias &&
- Filter != nullptr))
+ (isa<NamespaceDecl>(ND) && Filter != &ResultBuilder::IsNamespace &&
+ Filter != &ResultBuilder::IsNamespaceOrAlias && Filter != nullptr))
AsNestedNameSpecifier = true;
// Filter out any unwanted results.
@@ -597,8 +586,7 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
R.QualifierIsInformative = false;
if (!R.Qualifier)
- R.Qualifier = getRequiredQualification(SemaRef.Context,
- CurContext,
+ R.Qualifier = getRequiredQualification(SemaRef.Context, CurContext,
R.Declaration->getDeclContext());
return false;
}
@@ -609,23 +597,23 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
switch (T->getTypeClass()) {
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
- case BuiltinType::Void:
- return STC_Void;
+ case BuiltinType::Void:
+ return STC_Void;
- case BuiltinType::NullPtr:
- return STC_Pointer;
+ case BuiltinType::NullPtr:
+ return STC_Pointer;
- case BuiltinType::Overload:
- case BuiltinType::Dependent:
- return STC_Other;
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ return STC_Other;
- case BuiltinType::ObjCId:
- case BuiltinType::ObjCClass:
- case BuiltinType::ObjCSel:
- return STC_ObjectiveC;
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return STC_ObjectiveC;
- default:
- return STC_Arithmetic;
+ default:
+ return STC_Arithmetic;
}
case Type::Complex:
@@ -677,21 +665,21 @@ SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
ND = ND->getUnderlyingDecl();
- if (const TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ if (const auto *Type = dyn_cast<TypeDecl>(ND))
return C.getTypeDeclType(Type);
- if (const ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
+ if (const auto *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getCallResultType();
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND))
T = Method->getSendResultType();
- else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND))
T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
- else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
- else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ else if (const auto *Value = dyn_cast<ValueDecl>(ND))
T = Value->getType();
else
return QualType();
@@ -700,12 +688,12 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
// get down to the likely type of an expression when the entity is
// used.
do {
- if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
+ if (const auto *Ref = T->getAs<ReferenceType>()) {
T = Ref->getPointeeType();
continue;
}
- if (const PointerType *Pointer = T->getAs<PointerType>()) {
+ if (const auto *Pointer = T->getAs<PointerType>()) {
if (Pointer->getPointeeType()->isFunctionType()) {
T = Pointer->getPointeeType();
continue;
@@ -714,12 +702,12 @@ QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
break;
}
- if (const BlockPointerType *Block = T->getAs<BlockPointerType>()) {
+ if (const auto *Block = T->getAs<BlockPointerType>()) {
T = Block->getPointeeType();
continue;
}
- if (const FunctionType *Function = T->getAs<FunctionType>()) {
+ if (const auto *Function = T->getAs<FunctionType>()) {
T = Function->getReturnType();
continue;
}
@@ -738,8 +726,7 @@ unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
const DeclContext *LexicalDC = ND->getLexicalDeclContext();
if (LexicalDC->isFunctionOrMethod()) {
// _cmd is relatively rare
- if (const ImplicitParamDecl *ImplicitParam =
- dyn_cast<ImplicitParamDecl>(ND))
+ if (const auto *ImplicitParam = dyn_cast<ImplicitParamDecl>(ND))
if (ImplicitParam->getIdentifier() &&
ImplicitParam->getIdentifier()->isStr("_cmd"))
return CCP_ObjC_cmd;
@@ -770,10 +757,10 @@ unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
// likely that the user will want to write a type as other declarations.
if ((isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
!(CompletionContext.getKind() == CodeCompletionContext::CCC_Statement ||
- CompletionContext.getKind()
- == CodeCompletionContext::CCC_ObjCMessageReceiver ||
- CompletionContext.getKind()
- == CodeCompletionContext::CCC_ParenthesizedExpression))
+ CompletionContext.getKind() ==
+ CodeCompletionContext::CCC_ObjCMessageReceiver ||
+ CompletionContext.getKind() ==
+ CodeCompletionContext::CCC_ParenthesizedExpression))
return CCP_Type;
return CCP_Declaration;
@@ -783,7 +770,7 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
// If this is an Objective-C method declaration whose selector matches our
// preferred selector, give it a priority boost.
if (!PreferredSelector.isNull())
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
if (PreferredSelector == Method->getSelector())
R.Priority += CCD_SelectorMatch;
@@ -797,20 +784,28 @@ void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
R.Priority /= CCF_ExactTypeMatch;
// Check for nearly-matching types, based on classification of each.
- else if ((getSimplifiedTypeClass(PreferredType)
- == getSimplifiedTypeClass(TC)) &&
+ else if ((getSimplifiedTypeClass(PreferredType) ==
+ getSimplifiedTypeClass(TC)) &&
!(PreferredType->isEnumeralType() && TC->isEnumeralType()))
R.Priority /= CCF_SimilarTypeMatch;
}
}
}
+DeclContext::lookup_result getConstructors(ASTContext &Context,
+ const CXXRecordDecl *Record) {
+ QualType RecordTy = Context.getTypeDeclType(Record);
+ DeclarationName ConstructorName =
+ Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(RecordTy));
+ return Record->lookup(ConstructorName);
+}
+
void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
!CompletionContext.wantConstructorResults())
return;
- ASTContext &Context = SemaRef.Context;
const NamedDecl *D = R.Declaration;
const CXXRecordDecl *Record = nullptr;
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
@@ -828,16 +823,8 @@ void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!Record)
return;
-
- QualType RecordTy = Context.getTypeDeclType(Record);
- DeclarationName ConstructorName
- = Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(RecordTy));
- DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
- for (DeclContext::lookup_iterator I = Ctors.begin(),
- E = Ctors.end();
- I != E; ++I) {
- R.Declaration = *I;
+ for (NamedDecl *Ctor : getConstructors(SemaRef.Context, Record)) {
+ R.Declaration = Ctor;
R.CursorKind = getCursorKindForDecl(R.Declaration);
Results.push_back(R);
}
@@ -919,8 +906,8 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
continue;
// Protocols are in distinct namespaces from everything else.
- if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
- || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol) ||
+ (IDNS & Decl::IDNS_ObjCProtocol)) &&
I->first->getIdentifierNamespace() != IDNS)
continue;
@@ -942,18 +929,19 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
} else
- AdjustResultPriorityForDecl(R);
+ AdjustResultPriorityForDecl(R);
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- Namespace);
+ R.Qualifier =
+ NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- false, SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ R.Qualifier = NestedNameSpecifier::Create(
+ SemaRef.Context, nullptr, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
@@ -967,6 +955,11 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
MaybeAddConstructorResults(R);
}
+static void setInBaseClass(ResultBuilder::Result &R) {
+ R.Priority += CCD_InBaseClass;
+ R.InBaseClass = true;
+}
+
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
NamedDecl *Hiding, bool InBaseClass = false) {
if (R.Kind != Result::RK_Declaration) {
@@ -976,7 +969,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
}
// Look through using declarations.
- if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ if (const auto *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
R.Qualifier);
@@ -1015,27 +1008,27 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
- if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
- Namespace);
- else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
- R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr, false,
- SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ if (const auto *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier =
+ NestedNameSpecifier::Create(SemaRef.Context, nullptr, Namespace);
+ else if (const auto *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(
+ SemaRef.Context, nullptr, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
// Adjust the priority if this result comes from a base class.
if (InBaseClass)
- R.Priority += CCD_InBaseClass;
+ setInBaseClass(R);
AdjustResultPriorityForDecl(R);
if (HasObjectTypeQualifiers)
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
- Qualifiers MethodQuals
- = Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ Qualifiers MethodQuals = Method->getTypeQualifiers();
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
@@ -1054,7 +1047,7 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
void ResultBuilder::AddResult(Result R) {
assert(R.Kind != Result::RK_Declaration &&
- "Declaration results need more context");
+ "Declaration results need more context");
Results.push_back(R);
}
@@ -1064,9 +1057,8 @@ void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
/// Exit from the current scope.
void ResultBuilder::ExitScope() {
for (ShadowMap::iterator E = ShadowMaps.back().begin(),
- EEnd = ShadowMaps.back().end();
- E != EEnd;
- ++E)
+ EEnd = ShadowMaps.back().end();
+ E != EEnd; ++E)
E->second.Destroy();
ShadowMaps.pop_back();
@@ -1082,7 +1074,7 @@ bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOpts().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1107,7 +1099,7 @@ bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
- else if (SemaRef.getLangOpts().ObjC1) {
+ else if (SemaRef.getLangOpts().ObjC) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
@@ -1119,7 +1111,7 @@ bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
return 0;
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
+ if (const auto *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
return true;
@@ -1135,16 +1127,15 @@ bool ResultBuilder::IsOrdinaryNonValueName(const NamedDecl *ND) const {
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
- return (ND->getIdentifierNamespace() & IDNS) &&
- !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
- !isa<ObjCPropertyDecl>(ND);
+ return (ND->getIdentifierNamespace() & IDNS) && !isa<ValueDecl>(ND) &&
+ !isa<FunctionTemplateDecl>(ND) && !isa<ObjCPropertyDecl>(ND);
}
/// Determines whether the given declaration is suitable as the
/// start of a C++ nested-name-specifier, e.g., a class or namespace.
bool ResultBuilder::IsNestedNameSpecifier(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
return SemaRef.isAcceptableNestedNameSpecifier(ND);
@@ -1158,14 +1149,13 @@ bool ResultBuilder::IsEnum(const NamedDecl *ND) const {
/// Determines whether the given declaration is a class or struct.
bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
// For purposes of this check, interfaces match too.
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
- return RD->getTagKind() == TTK_Class ||
- RD->getTagKind() == TTK_Struct ||
- RD->getTagKind() == TTK_Interface;
+ if (const auto *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Class || RD->getTagKind() == TTK_Struct ||
+ RD->getTagKind() == TTK_Interface;
return false;
}
@@ -1173,10 +1163,10 @@ bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
/// Determines whether the given declaration is a union.
bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
// Allow us to find class templates, too.
- if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ if (const auto *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ if (const auto *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Union;
return false;
@@ -1250,11 +1240,12 @@ bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
return isObjCReceiverType(SemaRef.Context, T);
}
-bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const {
+bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(
+ const NamedDecl *ND) const {
if (IsObjCMessageReceiver(ND))
return true;
- const VarDecl *Var = dyn_cast<VarDecl>(ND);
+ const auto *Var = dyn_cast<VarDecl>(ND);
if (!Var)
return false;
@@ -1287,34 +1278,80 @@ bool ResultBuilder::IsObjCIvar(const NamedDecl *ND) const {
}
namespace {
- /// Visible declaration consumer that adds a code-completion result
- /// for each visible declaration.
- class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
- ResultBuilder &Results;
- DeclContext *CurContext;
- std::vector<FixItHint> FixIts;
- public:
- CodeCompletionDeclConsumer(
- ResultBuilder &Results, DeclContext *CurContext,
- std::vector<FixItHint> FixIts = std::vector<FixItHint>())
- : Results(Results), CurContext(CurContext), FixIts(std::move(FixIts)) {}
+/// Visible declaration consumer that adds a code-completion result
+/// for each visible declaration.
+class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
+ ResultBuilder &Results;
+ DeclContext *InitialLookupCtx;
+ // NamingClass and BaseType are used for access-checking. See
+ // Sema::IsSimplyAccessible for details.
+ CXXRecordDecl *NamingClass;
+ QualType BaseType;
+ std::vector<FixItHint> FixIts;
- void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
- bool InBaseClass) override {
- bool Accessible = true;
- if (Ctx)
- Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
- ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
- false, Accessible, FixIts);
- Results.AddResult(Result, CurContext, Hiding, InBaseClass);
+public:
+ CodeCompletionDeclConsumer(
+ ResultBuilder &Results, DeclContext *InitialLookupCtx,
+ QualType BaseType = QualType(),
+ std::vector<FixItHint> FixIts = std::vector<FixItHint>())
+ : Results(Results), InitialLookupCtx(InitialLookupCtx),
+ FixIts(std::move(FixIts)) {
+ NamingClass = llvm::dyn_cast<CXXRecordDecl>(InitialLookupCtx);
+ // If BaseType was not provided explicitly, emulate implicit 'this->'.
+ if (BaseType.isNull()) {
+ auto ThisType = Results.getSema().getCurrentThisType();
+ if (!ThisType.isNull()) {
+ assert(ThisType->isPointerType());
+ BaseType = ThisType->getPointeeType();
+ if (!NamingClass)
+ NamingClass = BaseType->getAsCXXRecordDecl();
+ }
}
-
- void EnteredContext(DeclContext* Ctx) override {
- Results.addVisitedContext(Ctx);
+ this->BaseType = BaseType;
+ }
+
+ void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass) override {
+ ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
+ false, IsAccessible(ND, Ctx), FixIts);
+ Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass);
+ }
+
+ void EnteredContext(DeclContext *Ctx) override {
+ Results.addVisitedContext(Ctx);
+ }
+
+private:
+ bool IsAccessible(NamedDecl *ND, DeclContext *Ctx) {
+ // Naming class to use for access check. In most cases it was provided
+ // explicitly (e.g. member access (lhs.foo) or qualified lookup (X::)),
+ // for unqualified lookup we fallback to the \p Ctx in which we found the
+ // member.
+ auto *NamingClass = this->NamingClass;
+ QualType BaseType = this->BaseType;
+ if (auto *Cls = llvm::dyn_cast_or_null<CXXRecordDecl>(Ctx)) {
+ if (!NamingClass)
+ NamingClass = Cls;
+ // When we emulate implicit 'this->' in an unqualified lookup, we might
+ // end up with an invalid naming class. In that case, we avoid emulating
+ // 'this->' qualifier to satisfy preconditions of the access checking.
+ if (NamingClass->getCanonicalDecl() != Cls->getCanonicalDecl() &&
+ !NamingClass->isDerivedFrom(Cls)) {
+ NamingClass = Cls;
+ BaseType = QualType();
+ }
+ } else {
+ // The decl was found outside the C++ class, so only ObjC access checks
+ // apply. Those do not rely on NamingClass and BaseType, so we clear them
+ // out.
+ NamingClass = nullptr;
+ BaseType = QualType();
}
- };
-}
+ return Results.getSema().IsSimplyAccessible(ND, NamingClass, BaseType);
+ }
+};
+} // namespace
/// Add type specifiers for the current language as keyword results.
static void AddTypeSpecifierResults(const LangOptions &LangOpts,
@@ -1347,8 +1384,8 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.getCodeCompletionTUInfo());
if (LangOpts.CPlusPlus) {
// C++-specific
- Results.AddResult(Result("bool", CCP_Type +
- (LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
+ Results.AddResult(
+ Result("bool", CCP_Type + (LangOpts.ObjC ? CCD_bool_in_ObjC : 0)));
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
@@ -1464,14 +1501,11 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCImplementationResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt);
+ ResultBuilder &Results, bool NeedAt);
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
static void AddTypedefResult(ResultBuilder &Results) {
@@ -1509,7 +1543,7 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
return false;
case Sema::PCC_ForInit:
- return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
+ return LangOpts.CPlusPlus || LangOpts.ObjC || LangOpts.C99;
}
llvm_unreachable("Invalid ParserCompletionContext!");
@@ -1535,8 +1569,7 @@ static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
///
/// This routine provides a fast path where we provide constant strings for
/// common type names.
-static const char *GetCompletionTypeString(QualType T,
- ASTContext &Context,
+static const char *GetCompletionTypeString(QualType T, ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionAllocator &Allocator) {
if (!T.getLocalQualifiers()) {
@@ -1549,11 +1582,16 @@ static const char *GetCompletionTypeString(QualType T,
if (TagDecl *Tag = TagT->getDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
- case TTK_Struct: return "struct <anonymous>";
- case TTK_Interface: return "__interface <anonymous>";
- case TTK_Class: return "class <anonymous>";
- case TTK_Union: return "union <anonymous>";
- case TTK_Enum: return "enum <anonymous>";
+ case TTK_Struct:
+ return "struct <anonymous>";
+ case TTK_Interface:
+ return "__interface <anonymous>";
+ case TTK_Class:
+ return "class <anonymous>";
+ case TTK_Union:
+ return "union <anonymous>";
+ case TTK_Enum:
+ return "enum <anonymous>";
}
}
}
@@ -1573,10 +1611,8 @@ static void addThisCompletion(Sema &S, ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
- Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
- S.Context,
- Policy,
- Allocator));
+ Builder.AddResultTypeChunk(
+ GetCompletionTypeString(ThisTy, S.Context, Policy, Allocator));
Builder.AddTypedTextChunk("this");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
@@ -1596,11 +1632,76 @@ static void AddStaticAssertResult(CodeCompletionBuilder &Builder,
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
+static void printOverrideString(llvm::raw_ostream &OS,
+ CodeCompletionString *CCS) {
+ for (const auto &C : *CCS) {
+ if (C.Kind == CodeCompletionString::CK_Optional)
+ printOverrideString(OS, C.Optional);
+ else
+ OS << C.Text;
+ // Add a space after return type.
+ if (C.Kind == CodeCompletionString::CK_ResultType)
+ OS << ' ';
+ }
+}
+
+static void AddOverrideResults(ResultBuilder &Results,
+ const CodeCompletionContext &CCContext,
+ CodeCompletionBuilder &Builder) {
+ Sema &S = Results.getSema();
+ const auto *CR = llvm::dyn_cast<CXXRecordDecl>(S.CurContext);
+ // If not inside a class/struct/union return empty.
+ if (!CR)
+ return;
+ // First store overrides within current class.
+ // These are stored by name to make querying fast in the later step.
+ llvm::StringMap<std::vector<FunctionDecl *>> Overrides;
+ for (auto *Method : CR->methods()) {
+ if (!Method->isVirtual() || !Method->getIdentifier())
+ continue;
+ Overrides[Method->getName()].push_back(Method);
+ }
+
+ for (const auto &Base : CR->bases()) {
+ const auto *BR = Base.getType().getTypePtr()->getAsCXXRecordDecl();
+ if (!BR)
+ continue;
+ for (auto *Method : BR->methods()) {
+ if (!Method->isVirtual() || !Method->getIdentifier())
+ continue;
+ const auto it = Overrides.find(Method->getName());
+ bool IsOverriden = false;
+ if (it != Overrides.end()) {
+ for (auto *MD : it->second) {
+ // If the method in current body is not an overload of this virtual
+ // function, then it overrides this one.
+ if (!S.IsOverload(MD, Method, false)) {
+ IsOverriden = true;
+ break;
+ }
+ }
+ }
+ if (!IsOverriden) {
+ // Generates a new CodeCompletionResult by taking this function and
+ // converting it into an override declaration with only one chunk in the
+ // final CodeCompletionString as a TypedTextChunk.
+ std::string OverrideSignature;
+ llvm::raw_string_ostream OS(OverrideSignature);
+ CodeCompletionResult CCR(Method, 0);
+ PrintingPolicy Policy =
+ getCompletionPrintingPolicy(S.getASTContext(), S.getPreprocessor());
+ auto *CCS = CCR.createCodeCompletionStringForOverride(
+ S.getPreprocessor(), S.getASTContext(), Builder,
+ /*IncludeBriefComments=*/false, CCContext, Policy);
+ Results.AddResult(CodeCompletionResult(CCS, Method, CCP_CodePattern));
+ }
+ }
+ }
+}
+
/// Add language constructs that show up for "ordinary" names.
-static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
- Scope *S,
- Sema &SemaRef,
- ResultBuilder &Results) {
+static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
+ Sema &SemaRef, ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
@@ -1649,10 +1750,12 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("declaration");
Results.AddResult(Result(Builder.TakeString()));
+ } else {
+ Results.AddResult(Result("template", CodeCompletionResult::RK_Keyword));
}
}
- if (SemaRef.getLangOpts().ObjC1)
+ if (SemaRef.getLangOpts().ObjC)
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
@@ -1704,6 +1807,12 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
if (IsNotInheritanceScope && Results.includeCodePatterns())
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
+
+ // FIXME: This adds override results only if we are at the first word of
+ // the declaration/definition. Also call this from other sides to have
+ // more use-cases.
+ AddOverrideResults(Results, CodeCompletionContext::CCC_ClassStructUnion,
+ Builder);
}
}
LLVM_FALLTHROUGH;
@@ -1717,6 +1826,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddPlaceholderChunk("parameters");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Results.AddResult(Result(Builder.TakeString()));
+ } else {
+ Results.AddResult(Result("template", CodeCompletionResult::RK_Keyword));
}
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
@@ -1760,7 +1871,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
- if (SemaRef.getLangOpts().ObjC1)
+ if (SemaRef.getLangOpts().ObjC)
AddObjCStatementResults(Results, true);
if (Results.includeCodePatterns()) {
@@ -1793,7 +1904,8 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
// Switch-specific statements.
- if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ if (SemaRef.getCurFunction() &&
+ !SemaRef.getCurFunction()->SwitchStack.empty()) {
// case expression:
Builder.AddTypedTextChunk("case");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -1869,10 +1981,9 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// "return expression ;" or "return ;", depending on whether we
// know the function is void or not.
bool isVoid = false;
- if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
+ if (const auto *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
isVoid = Function->getReturnType()->isVoidType();
- else if (ObjCMethodDecl *Method
- = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
isVoid = Method->getReturnType()->isVoidType();
else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
@@ -1900,7 +2011,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
AddStaticAssertResult(Builder, Results, SemaRef.getLangOpts());
}
- LLVM_FALLTHROUGH;
+ LLVM_FALLTHROUGH;
// Fall through (for statement expressions).
case Sema::PCC_ForInit:
@@ -2087,7 +2198,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
}
}
- if (SemaRef.getLangOpts().ObjC1) {
+ if (SemaRef.getLangOpts().ObjC) {
// Add "super", if we're in an Objective-C class with a superclass.
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
// The interface can be NULL.
@@ -2146,8 +2257,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
const PrintingPolicy &Policy,
- const NamedDecl *ND,
- QualType BaseType,
+ const NamedDecl *ND, QualType BaseType,
CodeCompletionBuilder &Result) {
if (!ND)
return;
@@ -2161,24 +2271,24 @@ static void AddResultTypeChunk(ASTContext &Context,
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getReturnType();
- else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND)) {
if (!BaseType.isNull())
T = Method->getSendResultType(BaseType);
else
T = Method->getReturnType();
- } else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
+ } else if (const auto *Enumerator = dyn_cast<EnumConstantDecl>(ND)) {
T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
T = clang::TypeName::getFullyQualifiedType(T, Context);
} else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
- } else if (const ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
+ } else if (const auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
if (!BaseType.isNull())
T = Ivar->getUsageType(BaseType);
else
T = Ivar->getType();
- } else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
+ } else if (const auto *Value = dyn_cast<ValueDecl>(ND)) {
T = Value->getType();
- } else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
+ } else if (const auto *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
if (!BaseType.isNull())
T = Property->getUsageType(BaseType);
else
@@ -2188,8 +2298,8 @@ static void AddResultTypeChunk(ASTContext &Context,
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
return;
- Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
- Result.getAllocator()));
+ Result.AddResultTypeChunk(
+ GetCompletionTypeString(T, Context, Policy, Result.getAllocator()));
}
static void MaybeAddSentinel(Preprocessor &PP,
@@ -2197,7 +2307,7 @@ static void MaybeAddSentinel(Preprocessor &PP,
CodeCompletionBuilder &Result) {
if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
if (Sentinel->getSentinel() == 0) {
- if (PP.getLangOpts().ObjC1 && PP.isMacroDefined("nil"))
+ if (PP.getLangOpts().ObjC && PP.isMacroDefined("nil"))
Result.AddTextChunk(", nil");
else if (PP.isMacroDefined("NULL"))
Result.AddTextChunk(", NULL");
@@ -2295,11 +2405,10 @@ formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
bool SuppressBlock = false,
Optional<ArrayRef<QualType>> ObjCSubsts = None);
-static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
- const ParmVarDecl *Param,
- bool SuppressName = false,
- bool SuppressBlock = false,
- Optional<ArrayRef<QualType>> ObjCSubsts = None) {
+static std::string
+FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
+ bool SuppressName = false, bool SuppressBlock = false,
+ Optional<ArrayRef<QualType>> ObjCSubsts = None) {
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -2315,8 +2424,8 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
if (ObjCMethodParam) {
- Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
- Type);
+ Result =
+ "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
Result += Type.getAsString(Policy) + ")";
if (Param->getIdentifier() && !SuppressName)
Result += Param->getIdentifier()->getName();
@@ -2447,13 +2556,15 @@ static std::string GetDefaultValueString(const ParmVarDecl *Param,
bool Invalid = CharSrcRange.isInvalid();
if (Invalid)
return "";
- StringRef srcText = Lexer::getSourceText(CharSrcRange, SM, LangOpts, &Invalid);
+ StringRef srcText =
+ Lexer::getSourceText(CharSrcRange, SM, LangOpts, &Invalid);
if (Invalid)
return "";
if (srcText.empty() || srcText == "=") {
// Lexer can't determine the value.
- // This happens if the code is incorrect (for example class is forward declared).
+ // This happens if the code is incorrect (for example class is forward
+ // declared).
return "";
}
std::string DefValue(srcText.str());
@@ -2461,7 +2572,8 @@ static std::string GetDefaultValueString(const ParmVarDecl *Param,
// this value always has (or always does not have) '=' in front of it
if (DefValue.at(0) != '=') {
// If we don't have '=' in front of value.
- // Lexer returns built-in types values without '=' and user-defined types values with it.
+ // Lexer returns built-in types values without '=' and user-defined types
+ // values with it.
return " = " + DefValue;
}
return " " + DefValue;
@@ -2501,18 +2613,18 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
// Format the placeholder string.
std::string PlaceholderStr = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
- PlaceholderStr += GetDefaultValueString(Param, PP.getSourceManager(), PP.getLangOpts());
+ PlaceholderStr +=
+ GetDefaultValueString(Param, PP.getSourceManager(), PP.getLangOpts());
if (Function->isVariadic() && P == N - 1)
PlaceholderStr += ", ...";
// Add the placeholder string.
Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString(PlaceholderStr));
+ Result.getAllocator().CopyString(PlaceholderStr));
}
- if (const FunctionProtoType *Proto
- = Function->getType()->getAs<FunctionProtoType>())
+ if (const auto *Proto = Function->getType()->getAs<FunctionProtoType>())
if (Proto->isVariadic()) {
if (Proto->getNumParams() == 0)
Result.AddPlaceholderChunk("...");
@@ -2522,13 +2634,10 @@ static void AddFunctionParameterChunks(Preprocessor &PP,
}
/// Add template parameter chunks to the given code completion string.
-static void AddTemplateParameterChunks(ASTContext &Context,
- const PrintingPolicy &Policy,
- const TemplateDecl *Template,
- CodeCompletionBuilder &Result,
- unsigned MaxParameters = 0,
- unsigned Start = 0,
- bool InDefaultArg = false) {
+static void AddTemplateParameterChunks(
+ ASTContext &Context, const PrintingPolicy &Policy,
+ const TemplateDecl *Template, CodeCompletionBuilder &Result,
+ unsigned MaxParameters = 0, unsigned Start = 0, bool InDefaultArg = false) {
bool FirstParameter = true;
// Prefer to take the template parameter names from the first declaration of
@@ -2539,8 +2648,8 @@ static void AddTemplateParameterChunks(ASTContext &Context,
TemplateParameterList::iterator PEnd = Params->end();
if (MaxParameters)
PEnd = Params->begin() + MaxParameters;
- for (TemplateParameterList::iterator P = Params->begin() + Start;
- P != PEnd; ++P) {
+ for (TemplateParameterList::iterator P = Params->begin() + Start; P != PEnd;
+ ++P) {
bool HasDefaultArg = false;
std::string PlaceholderStr;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
@@ -2555,8 +2664,8 @@ static void AddTemplateParameterChunks(ASTContext &Context,
}
HasDefaultArg = TTP->hasDefaultArgument();
- } else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
@@ -2598,18 +2707,17 @@ static void AddTemplateParameterChunks(ASTContext &Context,
// Add the placeholder string.
Result.AddPlaceholderChunk(
- Result.getAllocator().CopyString(PlaceholderStr));
+ Result.getAllocator().CopyString(PlaceholderStr));
}
}
/// Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
-static void
-AddQualifierToCompletionString(CodeCompletionBuilder &Result,
- NestedNameSpecifier *Qualifier,
- bool QualifierIsInformative,
- ASTContext &Context,
- const PrintingPolicy &Policy) {
+static void AddQualifierToCompletionString(CodeCompletionBuilder &Result,
+ NestedNameSpecifier *Qualifier,
+ bool QualifierIsInformative,
+ ASTContext &Context,
+ const PrintingPolicy &Policy) {
if (!Qualifier)
return;
@@ -2627,25 +2735,24 @@ AddQualifierToCompletionString(CodeCompletionBuilder &Result,
static void
AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
const FunctionDecl *Function) {
- const FunctionProtoType *Proto
- = Function->getType()->getAs<FunctionProtoType>();
+ const auto *Proto = Function->getType()->getAs<FunctionProtoType>();
if (!Proto || !Proto->getTypeQuals())
return;
// FIXME: Add ref-qualifier!
// Handle single qualifiers without copying
- if (Proto->getTypeQuals() == Qualifiers::Const) {
+ if (Proto->getTypeQuals().hasOnlyConst()) {
Result.AddInformativeChunk(" const");
return;
}
- if (Proto->getTypeQuals() == Qualifiers::Volatile) {
+ if (Proto->getTypeQuals().hasOnlyVolatile()) {
Result.AddInformativeChunk(" volatile");
return;
}
- if (Proto->getTypeQuals() == Qualifiers::Restrict) {
+ if (Proto->getTypeQuals().hasOnlyRestrict()) {
Result.AddInformativeChunk(" restrict");
return;
}
@@ -2670,37 +2777,51 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
return;
switch (Name.getNameKind()) {
- case DeclarationName::CXXOperatorName: {
- const char *OperatorName = nullptr;
- switch (Name.getCXXOverloadedOperator()) {
- case OO_None:
- case OO_Conditional:
- case NUM_OVERLOADED_OPERATORS:
- OperatorName = "operator";
- break;
+ case DeclarationName::CXXOperatorName: {
+ const char *OperatorName = nullptr;
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ OperatorName = "operator";
+ break;
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- case OO_##Name: OperatorName = "operator" Spelling; break;
-#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ case OO_##Name: \
+ OperatorName = "operator" Spelling; \
+ break;
+#define OVERLOADED_OPERATOR_MULTI(Name, Spelling, Unary, Binary, MemberOnly)
#include "clang/Basic/OperatorKinds.def"
- case OO_New: OperatorName = "operator new"; break;
- case OO_Delete: OperatorName = "operator delete"; break;
- case OO_Array_New: OperatorName = "operator new[]"; break;
- case OO_Array_Delete: OperatorName = "operator delete[]"; break;
- case OO_Call: OperatorName = "operator()"; break;
- case OO_Subscript: OperatorName = "operator[]"; break;
- }
- Result.AddTypedTextChunk(OperatorName);
+ case OO_New:
+ OperatorName = "operator new";
+ break;
+ case OO_Delete:
+ OperatorName = "operator delete";
+ break;
+ case OO_Array_New:
+ OperatorName = "operator new[]";
+ break;
+ case OO_Array_Delete:
+ OperatorName = "operator delete[]";
+ break;
+ case OO_Call:
+ OperatorName = "operator()";
+ break;
+ case OO_Subscript:
+ OperatorName = "operator[]";
break;
}
+ Result.AddTypedTextChunk(OperatorName);
+ break;
+ }
case DeclarationName::Identifier:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXLiteralOperatorName:
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
break;
case DeclarationName::CXXDeductionGuideName:
@@ -2713,19 +2834,18 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
- if (const RecordType *RecordTy = Ty->getAs<RecordType>())
+ if (const auto *RecordTy = Ty->getAs<RecordType>())
Record = cast<CXXRecordDecl>(RecordTy->getDecl());
- else if (const InjectedClassNameType *InjectedTy
- = Ty->getAs<InjectedClassNameType>())
+ else if (const auto *InjectedTy = Ty->getAs<InjectedClassNameType>())
Record = InjectedTy->getDecl();
else {
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
break;
}
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(Record->getNameAsString()));
+ Result.getAllocator().CopyString(Record->getNameAsString()));
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Context, Policy, Template, Result);
@@ -2736,11 +2856,10 @@ static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
}
}
-CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
- const CodeCompletionContext &CCContext,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) {
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(
+ Sema &S, const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
return CreateCodeCompletionString(S.Context, S.PP, CCContext, Allocator,
CCTUInfo, IncludeBriefComments);
}
@@ -2797,13 +2916,10 @@ CodeCompletionString *CodeCompletionResult::CreateCodeCompletionStringForMacro(
/// \returns Either a new, heap-allocated code completion string describing
/// how to use this result, or NULL to indicate that the string or name of the
/// result is all that is needed.
-CodeCompletionString *
-CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
- Preprocessor &PP,
- const CodeCompletionContext &CCContext,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) {
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(
+ ASTContext &Ctx, Preprocessor &PP, const CodeCompletionContext &CCContext,
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments) {
if (Kind == RK_Macro)
return CreateCodeCompletionStringForMacro(PP, Allocator, CCTUInfo);
@@ -2832,6 +2948,30 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
assert(Kind == RK_Declaration && "Missed a result kind?");
+ return createCodeCompletionStringForDecl(
+ PP, Ctx, Result, IncludeBriefComments, CCContext, Policy);
+}
+
+CodeCompletionString *
+CodeCompletionResult::createCodeCompletionStringForOverride(
+ Preprocessor &PP, ASTContext &Ctx, CodeCompletionBuilder &Result,
+ bool IncludeBriefComments, const CodeCompletionContext &CCContext,
+ PrintingPolicy &Policy) {
+ std::string OverrideSignature;
+ llvm::raw_string_ostream OS(OverrideSignature);
+ auto *CCS = createCodeCompletionStringForDecl(PP, Ctx, Result,
+ /*IncludeBriefComments=*/false,
+ CCContext, Policy);
+ printOverrideString(OS, CCS);
+ OS << " override";
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(OS.str()));
+ return Result.TakeString();
+}
+
+CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
+ Preprocessor &PP, ASTContext &Ctx, CodeCompletionBuilder &Result,
+ bool IncludeBriefComments, const CodeCompletionContext &CCContext,
+ PrintingPolicy &Policy) {
const NamedDecl *ND = Declaration;
Result.addParentContext(ND->getDeclContext());
@@ -2844,7 +2984,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (StartsNestedNameSpecifier) {
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
Result.AddTextChunk("::");
return Result.TakeString();
}
@@ -2854,7 +2994,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
AddResultTypeChunk(Ctx, Policy, ND, CCContext.getBaseType(), Result);
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
+ if (const auto *Function = dyn_cast<FunctionDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
AddTypedNameChunk(Ctx, Policy, ND, Result);
@@ -2865,7 +3005,8 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
- if (const FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
+ if (const FunctionTemplateDecl *FunTmpl =
+ dyn_cast<FunctionTemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
@@ -2884,16 +3025,16 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
// FIXME: We need to abstract template parameters better!
bool HasDefaultArg = false;
NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
- LastDeducibleArgument - 1);
+ LastDeducibleArgument - 1);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
HasDefaultArg = TTP->hasDefaultArgument();
- else if (NonTypeTemplateParmDecl *NTTP
- = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param))
HasDefaultArg = NTTP->hasDefaultArgument();
else {
assert(isa<TemplateTemplateParmDecl>(Param));
- HasDefaultArg
- = cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
+ HasDefaultArg =
+ cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
}
if (!HasDefaultArg)
@@ -2919,22 +3060,21 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
return Result.TakeString();
}
- if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
+ if (const auto *Template = dyn_cast<TemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(Template->getNameAsString()));
+ Result.getAllocator().CopyString(Template->getNameAsString()));
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Ctx, Policy, Template, Result);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
return Result.TakeString();
}
-
- if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ if (const auto *Method = dyn_cast<ObjCMethodDecl>(ND)) {
Selector Sel = Method->getSelector();
if (Sel.isUnarySelector()) {
- Result.AddTypedTextChunk(Result.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Sel.getNameForSlot(0)));
return Result.TakeString();
}
@@ -2952,7 +3092,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
}
unsigned Idx = 0;
for (ObjCMethodDecl::param_const_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
+ PEnd = Method->param_end();
P != PEnd; (void)++P, ++Idx) {
if (Idx > 0) {
std::string Keyword;
@@ -2979,12 +3119,11 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
if (ParamType->isBlockPointerType() && !DeclaringEntity)
Arg = FormatFunctionParameter(Policy, *P, true,
- /*SuppressBlock=*/false,
- ObjCSubsts);
+ /*SuppressBlock=*/false, ObjCSubsts);
else {
if (ObjCSubsts)
- ParamType = ParamType.substObjCTypeArgs(Ctx, *ObjCSubsts,
- ObjCSubstitutionContext::Parameter);
+ ParamType = ParamType.substObjCTypeArgs(
+ Ctx, *ObjCSubsts, ObjCSubstitutionContext::Parameter);
Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(),
ParamType);
Arg += ParamType.getAsString(Policy) + ")";
@@ -3025,7 +3164,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Ctx, Policy);
Result.AddTypedTextChunk(
- Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.getAllocator().CopyString(ND->getNameAsString()));
return Result.TakeString();
}
@@ -3037,7 +3176,7 @@ const RawComment *clang::getCompletionComment(const ASTContext &Ctx,
return RC;
// Try to find comment from a property for ObjC methods.
- const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND);
+ const auto *M = dyn_cast<ObjCMethodDecl>(ND);
if (!M)
return nullptr;
const ObjCPropertyDecl *PDecl = M->findPropertyDecl();
@@ -3049,7 +3188,7 @@ const RawComment *clang::getCompletionComment(const ASTContext &Ctx,
const RawComment *clang::getPatternCompletionComment(const ASTContext &Ctx,
const NamedDecl *ND) {
- const ObjCMethodDecl *M = dyn_cast_or_null<ObjCMethodDecl>(ND);
+ const auto *M = dyn_cast_or_null<ObjCMethodDecl>(ND);
if (!M || !M->isPropertyAccessor())
return nullptr;
@@ -3072,8 +3211,7 @@ const RawComment *clang::getPatternCompletionComment(const ASTContext &Ctx,
const RawComment *clang::getParameterComment(
const ASTContext &Ctx,
- const CodeCompleteConsumer::OverloadCandidate &Result,
- unsigned ArgIndex) {
+ const CodeCompleteConsumer::OverloadCandidate &Result, unsigned ArgIndex) {
auto FDecl = Result.getFunction();
if (!FDecl)
return nullptr;
@@ -3089,12 +3227,11 @@ static void AddOverloadParameterChunks(ASTContext &Context,
const FunctionDecl *Function,
const FunctionProtoType *Prototype,
CodeCompletionBuilder &Result,
- unsigned CurrentArg,
- unsigned Start = 0,
+ unsigned CurrentArg, unsigned Start = 0,
bool InOptional = false) {
bool FirstParameter = true;
- unsigned NumParams = Function ? Function->getNumParams()
- : Prototype->getNumParams();
+ unsigned NumParams =
+ Function ? Function->getNumParams() : Prototype->getNumParams();
for (unsigned P = Start; P != NumParams; ++P) {
if (Function && Function->getParamDecl(P)->hasDefaultArg() && !InOptional) {
@@ -3124,14 +3261,15 @@ static void AddOverloadParameterChunks(ASTContext &Context,
const ParmVarDecl *Param = Function->getParamDecl(P);
Placeholder = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
- Placeholder += GetDefaultValueString(Param, Context.getSourceManager(), Context.getLangOpts());
+ Placeholder += GetDefaultValueString(Param, Context.getSourceManager(),
+ Context.getLangOpts());
} else {
Placeholder = Prototype->getParamType(P).getAsString(Policy);
}
if (P == CurrentArg)
Result.AddCurrentParameterChunk(
- Result.getAllocator().CopyString(Placeholder));
+ Result.getAllocator().CopyString(Placeholder));
else
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Placeholder));
}
@@ -3153,23 +3291,22 @@ static void AddOverloadParameterChunks(ASTContext &Context,
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
- unsigned CurrentArg, Sema &S,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) const {
+ unsigned CurrentArg, Sema &S, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// FIXME: Set priority, availability appropriately.
- CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, 1,
+ CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
- const FunctionProtoType *Proto
- = dyn_cast<FunctionProtoType>(getFunctionType());
+ const FunctionProtoType *Proto =
+ dyn_cast<FunctionProtoType>(getFunctionType());
if (!FDecl && !Proto) {
// Function without a prototype. Just give the return type and a
// highlighted ellipsis.
const FunctionType *FT = getFunctionType();
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- FT->getReturnType().getAsString(Policy)));
+ FT->getReturnType().getAsString(Policy)));
Result.AddChunk(CodeCompletionString::CK_LeftParen);
Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
Result.AddChunk(CodeCompletionString::CK_RightParen);
@@ -3183,10 +3320,9 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
}
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
Result.AddTextChunk(
- Result.getAllocator().CopyString(FDecl->getNameAsString()));
+ Result.getAllocator().CopyString(FDecl->getNameAsString()));
} else {
- Result.AddResultTypeChunk(
- Result.getAllocator().CopyString(
+ Result.AddResultTypeChunk(Result.getAllocator().CopyString(
Proto->getReturnType().getAsString(Policy)));
}
@@ -3216,8 +3352,7 @@ unsigned clang::getMacroUsagePriority(StringRef MacroName,
Priority = CCP_Constant;
// Treat "bool" as a type.
else if (MacroName.equals("bool"))
- Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
-
+ Priority = CCP_Type + (LangOpts.ObjC ? CCD_bool_in_ObjC : 0);
return Priority;
}
@@ -3227,105 +3362,142 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
return CXCursor_UnexposedDecl;
switch (D->getKind()) {
- case Decl::Enum: return CXCursor_EnumDecl;
- case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
- case Decl::Field: return CXCursor_FieldDecl;
- case Decl::Function:
- return CXCursor_FunctionDecl;
- case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
- case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
- case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
-
- case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
- case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
- case Decl::ObjCMethod:
- return cast<ObjCMethodDecl>(D)->isInstanceMethod()
- ? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
- case Decl::CXXMethod: return CXCursor_CXXMethod;
- case Decl::CXXConstructor: return CXCursor_Constructor;
- case Decl::CXXDestructor: return CXCursor_Destructor;
- case Decl::CXXConversion: return CXCursor_ConversionFunction;
- case Decl::ObjCProperty: return CXCursor_ObjCPropertyDecl;
- case Decl::ObjCProtocol: return CXCursor_ObjCProtocolDecl;
- case Decl::ParmVar: return CXCursor_ParmDecl;
- case Decl::Typedef: return CXCursor_TypedefDecl;
- case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
- case Decl::TypeAliasTemplate: return CXCursor_TypeAliasTemplateDecl;
- case Decl::Var: return CXCursor_VarDecl;
- case Decl::Namespace: return CXCursor_Namespace;
- case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
- case Decl::TemplateTypeParm: return CXCursor_TemplateTypeParameter;
- case Decl::NonTypeTemplateParm:return CXCursor_NonTypeTemplateParameter;
- case Decl::TemplateTemplateParm:return CXCursor_TemplateTemplateParameter;
- case Decl::FunctionTemplate: return CXCursor_FunctionTemplate;
- case Decl::ClassTemplate: return CXCursor_ClassTemplate;
- case Decl::AccessSpec: return CXCursor_CXXAccessSpecifier;
- case Decl::ClassTemplatePartialSpecialization:
- return CXCursor_ClassTemplatePartialSpecialization;
- case Decl::UsingDirective: return CXCursor_UsingDirective;
- case Decl::StaticAssert: return CXCursor_StaticAssert;
- case Decl::Friend: return CXCursor_FriendDecl;
- case Decl::TranslationUnit: return CXCursor_TranslationUnit;
-
- case Decl::Using:
- case Decl::UnresolvedUsingValue:
- case Decl::UnresolvedUsingTypename:
- return CXCursor_UsingDeclaration;
-
- case Decl::ObjCPropertyImpl:
- switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
- case ObjCPropertyImplDecl::Dynamic:
- return CXCursor_ObjCDynamicDecl;
-
- case ObjCPropertyImplDecl::Synthesize:
- return CXCursor_ObjCSynthesizeDecl;
- }
+ case Decl::Enum:
+ return CXCursor_EnumDecl;
+ case Decl::EnumConstant:
+ return CXCursor_EnumConstantDecl;
+ case Decl::Field:
+ return CXCursor_FieldDecl;
+ case Decl::Function:
+ return CXCursor_FunctionDecl;
+ case Decl::ObjCCategory:
+ return CXCursor_ObjCCategoryDecl;
+ case Decl::ObjCCategoryImpl:
+ return CXCursor_ObjCCategoryImplDecl;
+ case Decl::ObjCImplementation:
+ return CXCursor_ObjCImplementationDecl;
+
+ case Decl::ObjCInterface:
+ return CXCursor_ObjCInterfaceDecl;
+ case Decl::ObjCIvar:
+ return CXCursor_ObjCIvarDecl;
+ case Decl::ObjCMethod:
+ return cast<ObjCMethodDecl>(D)->isInstanceMethod()
+ ? CXCursor_ObjCInstanceMethodDecl
+ : CXCursor_ObjCClassMethodDecl;
+ case Decl::CXXMethod:
+ return CXCursor_CXXMethod;
+ case Decl::CXXConstructor:
+ return CXCursor_Constructor;
+ case Decl::CXXDestructor:
+ return CXCursor_Destructor;
+ case Decl::CXXConversion:
+ return CXCursor_ConversionFunction;
+ case Decl::ObjCProperty:
+ return CXCursor_ObjCPropertyDecl;
+ case Decl::ObjCProtocol:
+ return CXCursor_ObjCProtocolDecl;
+ case Decl::ParmVar:
+ return CXCursor_ParmDecl;
+ case Decl::Typedef:
+ return CXCursor_TypedefDecl;
+ case Decl::TypeAlias:
+ return CXCursor_TypeAliasDecl;
+ case Decl::TypeAliasTemplate:
+ return CXCursor_TypeAliasTemplateDecl;
+ case Decl::Var:
+ return CXCursor_VarDecl;
+ case Decl::Namespace:
+ return CXCursor_Namespace;
+ case Decl::NamespaceAlias:
+ return CXCursor_NamespaceAlias;
+ case Decl::TemplateTypeParm:
+ return CXCursor_TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:
+ return CXCursor_NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:
+ return CXCursor_TemplateTemplateParameter;
+ case Decl::FunctionTemplate:
+ return CXCursor_FunctionTemplate;
+ case Decl::ClassTemplate:
+ return CXCursor_ClassTemplate;
+ case Decl::AccessSpec:
+ return CXCursor_CXXAccessSpecifier;
+ case Decl::ClassTemplatePartialSpecialization:
+ return CXCursor_ClassTemplatePartialSpecialization;
+ case Decl::UsingDirective:
+ return CXCursor_UsingDirective;
+ case Decl::StaticAssert:
+ return CXCursor_StaticAssert;
+ case Decl::Friend:
+ return CXCursor_FriendDecl;
+ case Decl::TranslationUnit:
+ return CXCursor_TranslationUnit;
+
+ case Decl::Using:
+ case Decl::UnresolvedUsingValue:
+ case Decl::UnresolvedUsingTypename:
+ return CXCursor_UsingDeclaration;
+
+ case Decl::ObjCPropertyImpl:
+ switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
+ case ObjCPropertyImplDecl::Dynamic:
+ return CXCursor_ObjCDynamicDecl;
+
+ case ObjCPropertyImplDecl::Synthesize:
+ return CXCursor_ObjCSynthesizeDecl;
+ }
+ llvm_unreachable("Unexpected Kind!");
+
+ case Decl::Import:
+ return CXCursor_ModuleImportDecl;
+
+ case Decl::ObjCTypeParam:
+ return CXCursor_TemplateTypeParameter;
- case Decl::Import:
- return CXCursor_ModuleImportDecl;
-
- case Decl::ObjCTypeParam: return CXCursor_TemplateTypeParameter;
-
- default:
- if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
- switch (TD->getTagKind()) {
- case TTK_Interface: // fall through
- case TTK_Struct: return CXCursor_StructDecl;
- case TTK_Class: return CXCursor_ClassDecl;
- case TTK_Union: return CXCursor_UnionDecl;
- case TTK_Enum: return CXCursor_EnumDecl;
- }
+ default:
+ if (const auto *TD = dyn_cast<TagDecl>(D)) {
+ switch (TD->getTagKind()) {
+ case TTK_Interface: // fall through
+ case TTK_Struct:
+ return CXCursor_StructDecl;
+ case TTK_Class:
+ return CXCursor_ClassDecl;
+ case TTK_Union:
+ return CXCursor_UnionDecl;
+ case TTK_Enum:
+ return CXCursor_EnumDecl;
}
+ }
}
return CXCursor_UnexposedDecl;
}
static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
- bool IncludeUndefined,
+ bool LoadExternal, bool IncludeUndefined,
bool TargetTypeIsPointer = false) {
typedef CodeCompletionResult Result;
Results.EnterNewScope();
- for (Preprocessor::macro_iterator M = PP.macro_begin(),
- MEnd = PP.macro_end();
+ for (Preprocessor::macro_iterator M = PP.macro_begin(LoadExternal),
+ MEnd = PP.macro_end(LoadExternal);
M != MEnd; ++M) {
auto MD = PP.getMacroDefinition(M->first);
if (IncludeUndefined || MD) {
- if (MacroInfo *MI = MD.getMacroInfo())
- if (MI->isUsedForHeaderGuard())
- continue;
+ MacroInfo *MI = MD.getMacroInfo();
+ if (MI && MI->isUsedForHeaderGuard())
+ continue;
- Results.AddResult(Result(M->first,
- getMacroUsagePriority(M->first->getName(),
- PP.getLangOpts(),
- TargetTypeIsPointer)));
+ Results.AddResult(
+ Result(M->first, MI,
+ getMacroUsagePriority(M->first->getName(), PP.getLangOpts(),
+ TargetTypeIsPointer)));
}
}
Results.ExitScope();
-
}
static void AddPrettyFunctionResults(const LangOptions &LangOpts,
@@ -3350,8 +3522,8 @@ static void HandleCodeCompleteResults(Sema *S,
CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
}
-static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
- Sema::ParserCompletionContext PCC) {
+static CodeCompletionContext
+mapCodeCompletionContext(Sema &S, Sema::ParserCompletionContext PCC) {
switch (PCC) {
case Sema::PCC_Namespace:
return CodeCompletionContext::CCC_TopLevel;
@@ -3381,14 +3553,16 @@ static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
case Sema::PCC_ForInit:
if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
- S.getLangOpts().ObjC1)
+ S.getLangOpts().ObjC)
return CodeCompletionContext::CCC_ParenthesizedExpression;
else
return CodeCompletionContext::CCC_Expression;
case Sema::PCC_Expression:
- case Sema::PCC_Condition:
return CodeCompletionContext::CCC_Expression;
+ case Sema::PCC_Condition:
+ return CodeCompletionContext(CodeCompletionContext::CCC_Expression,
+ S.getASTContext().BoolTy);
case Sema::PCC_Statement:
return CodeCompletionContext::CCC_Statement;
@@ -3421,7 +3595,6 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
while (isa<BlockDecl>(CurContext))
CurContext = CurContext->getParent();
-
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
if (!Method || !Method->isVirtual())
return;
@@ -3441,9 +3614,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
// If we need a nested-name-specifier, add one now.
if (!InContext) {
- NestedNameSpecifier *NNS
- = getRequiredQualification(S.Context, CurContext,
- Overridden->getDeclContext());
+ NestedNameSpecifier *NNS = getRequiredQualification(
+ S.Context, CurContext, Overridden->getDeclContext());
if (NNS) {
std::string Str;
llvm::raw_string_ostream OS(Str);
@@ -3453,8 +3625,8 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
- Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
- Overridden->getNameAsString()));
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(Overridden->getNameAsString()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
for (auto P : Method->parameters()) {
@@ -3467,11 +3639,9 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
Results.getAllocator().CopyString(P->getIdentifier()->getName()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- CCP_SuperCompletion,
- CXCursor_CXXMethod,
- CXAvailability_Available,
- Overridden));
+ Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(), CCP_SuperCompletion, CXCursor_CXXMethod,
+ CXAvailability_Available, Overridden));
Results.Ignore(Overridden);
}
}
@@ -3493,39 +3663,35 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
PP.getHeaderSearchInfo().collectAllModules(Modules);
for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Modules[I]->Name));
- Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
- CXCursor_ModuleImportDecl,
- Modules[I]->isAvailable()
- ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Builder.getAllocator().CopyString(Modules[I]->Name));
+ Results.AddResult(Result(
+ Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
+ Modules[I]->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
} else if (getLangOpts().Modules) {
// Load the named module.
- Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
- Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ Module *Mod =
+ PP.getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
+ /*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
+ SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString((*Sub)->Name));
- Results.AddResult(Result(Builder.TakeString(),
- CCP_Declaration,
- CXCursor_ModuleImportDecl,
- (*Sub)->isAvailable()
- ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Builder.getAllocator().CopyString((*Sub)->Name));
+ Results.AddResult(Result(
+ Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
+ (*Sub)->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
}
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
void Sema::CodeCompleteOrdinaryName(Scope *S,
@@ -3572,10 +3738,9 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
// If we are in a C++ non-static member function, check the qualifiers on
// the member function to filter/prioritize the results list.
- if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext))
- if (CurMethod->isInstance())
- Results.setObjectTypeQualifiers(
- Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
+ auto ThisType = getCurrentThisType();
+ if (!ThisType.isNull())
+ Results.setObjectTypeQualifiers(ThisType->getPointeeType().getQualifiers());
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
@@ -3609,28 +3774,30 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
}
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, false);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- bool IsSuper,
+ bool AtArgumentExpression, bool IsSuper,
ResultBuilder &Results);
void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- AllowNestedNameSpecifiers
- ? CodeCompletionContext::CCC_PotentiallyQualifiedName
- : CodeCompletionContext::CCC_Name);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ AllowNestedNameSpecifiers
+ // FIXME: Try to separate codepath leading here to deduce whether we
+ // need an existing symbol or a new one.
+ ? CodeCompletionContext::CCC_SymbolOrNewName
+ : CodeCompletionContext::CCC_NewName);
Results.EnterNewScope();
// Type qualifiers can come after names.
@@ -3671,12 +3838,11 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
DS.getTypeSpecType() == DeclSpec::TST_typename &&
DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
- !DS.isTypeAltiVecVector() &&
- S &&
+ !DS.isTypeAltiVecVector() && S &&
(S->getFlags() & Scope::DeclScope) != 0 &&
(S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
- Scope::FunctionPrototypeScope |
- Scope::AtCatchScope)) == 0) {
+ Scope::FunctionPrototypeScope | Scope::AtCatchScope)) ==
+ 0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
AddClassMessageCompletions(*this, S, T, None, false, false, Results);
@@ -3685,15 +3851,14 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
struct Sema::CodeCompleteExpressionData {
CodeCompleteExpressionData(QualType PreferredType = QualType())
- : PreferredType(PreferredType), IntegralConstantExpression(false),
- ObjCCollection(false) { }
+ : PreferredType(PreferredType), IntegralConstantExpression(false),
+ ObjCCollection(false) {}
QualType PreferredType;
bool IntegralConstantExpression;
@@ -3737,31 +3902,35 @@ void Sema::CodeCompleteExpression(Scope *S,
bool PreferredTypeIsPointer = false;
if (!Data.PreferredType.isNull())
- PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
- || Data.PreferredType->isMemberPointerType()
- || Data.PreferredType->isBlockPointerType();
+ PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType() ||
+ Data.PreferredType->isMemberPointerType() ||
+ Data.PreferredType->isBlockPointerType();
- if (S->getFnParent() &&
- !Data.ObjCCollection &&
+ if (S->getFnParent() && !Data.ObjCCollection &&
!Data.IntegralConstantExpression)
AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false,
+ PreferredTypeIsPointer);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
+void Sema::CodeCompleteExpression(Scope *S, QualType PreferredType) {
+ return CodeCompleteExpression(S, CodeCompleteExpressionData(PreferredType));
+}
+
void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
if (E.isInvalid())
CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
- else if (getLangOpts().ObjC1)
+ else if (getLangOpts().ObjC)
CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
}
/// The set of properties that have already been added, referenced by
/// property name.
-typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
+typedef llvm::SmallPtrSet<IdentifierInfo *, 16> AddedPropertiesSet;
/// Retrieve the container definition, if any?
static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
@@ -3819,11 +3988,13 @@ static void AddObjCBlockCall(ASTContext &Context, const PrintingPolicy &Policy,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
-static void AddObjCProperties(
- const CodeCompletionContext &CCContext, ObjCContainerDecl *Container,
- bool AllowCategories, bool AllowNullaryMethods, DeclContext *CurContext,
- AddedPropertiesSet &AddedProperties, ResultBuilder &Results,
- bool IsBaseExprStatement = false, bool IsClassProperty = false) {
+static void
+AddObjCProperties(const CodeCompletionContext &CCContext,
+ ObjCContainerDecl *Container, bool AllowCategories,
+ bool AllowNullaryMethods, DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties, ResultBuilder &Results,
+ bool IsBaseExprStatement = false,
+ bool IsClassProperty = false, bool InOriginalClass = true) {
typedef CodeCompletionResult Result;
// Retrieve the definition.
@@ -3838,8 +4009,10 @@ static void AddObjCProperties(
// expressions.
if (!P->getType().getTypePtr()->isBlockPointerType() ||
!IsBaseExprStatement) {
- Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
- CurContext);
+ Result R = Result(P, Results.getBasePriority(P), nullptr);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
return;
}
@@ -3850,8 +4023,10 @@ static void AddObjCProperties(
findTypeLocationForBlockDecl(P->getTypeSourceInfo(), BlockLoc,
BlockProtoLoc);
if (!BlockLoc) {
- Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
- CurContext);
+ Result R = Result(P, Results.getBasePriority(P), nullptr);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
return;
}
@@ -3862,9 +4037,10 @@ static void AddObjCProperties(
AddObjCBlockCall(Container->getASTContext(),
getCompletionPrintingPolicy(Results.getSema()), Builder, P,
BlockLoc, BlockProtoLoc);
- Results.MaybeAddResult(
- Result(Builder.TakeString(), P, Results.getBasePriority(P)),
- CurContext);
+ Result R = Result(Builder.TakeString(), P, Results.getBasePriority(P));
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
// Provide additional block setter completion iff the base expression is a
// statement and the block property is mutable.
@@ -3890,13 +4066,15 @@ static void AddObjCProperties(
// otherwise the setter completion should show up before the default
// property completion, as we normally want to use the result of the
// call.
- Results.MaybeAddResult(
+ Result R =
Result(Builder.TakeString(), P,
Results.getBasePriority(P) +
(BlockLoc.getTypePtr()->getReturnType()->isVoidType()
? CCD_BlockPropertySetter
- : -CCD_BlockPropertySetter)),
- CurContext);
+ : -CCD_BlockPropertySetter));
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
}
};
@@ -3924,10 +4102,11 @@ static void AddObjCProperties(
AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(), Builder);
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(Name->getName()));
- Results.MaybeAddResult(
- Result(Builder.TakeString(), M,
- CCP_MemberDeclaration + CCD_MethodAsProperty),
- CurContext);
+ Result R = Result(Builder.TakeString(), M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty);
+ if (!InOriginalClass)
+ setInBaseClass(R);
+ Results.MaybeAddResult(R, CurContext);
};
if (IsClassProperty) {
@@ -3953,42 +4132,47 @@ static void AddObjCProperties(
for (auto *P : Protocol->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
- } else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
+ } else if (ObjCInterfaceDecl *IFace =
+ dyn_cast<ObjCInterfaceDecl>(Container)) {
if (AllowCategories) {
// Look through categories.
for (auto *Cat : IFace->known_categories())
AddObjCProperties(CCContext, Cat, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ InOriginalClass);
}
// Look through protocols.
for (auto *I : IFace->all_referenced_protocols())
AddObjCProperties(CCContext, I, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
// Look in the superclass.
if (IFace->getSuperClass())
AddObjCProperties(CCContext, IFace->getSuperClass(), AllowCategories,
AllowNullaryMethods, CurContext, AddedProperties,
- Results, IsBaseExprStatement, IsClassProperty);
- } else if (const ObjCCategoryDecl *Category
- = dyn_cast<ObjCCategoryDecl>(Container)) {
+ Results, IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
+ } else if (const auto *Category =
+ dyn_cast<ObjCCategoryDecl>(Container)) {
// Look through protocols.
for (auto *P : Category->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results,
- IsBaseExprStatement, IsClassProperty);
+ IsBaseExprStatement, IsClassProperty,
+ /*InOriginalClass*/ false);
}
}
-static void AddRecordMembersCompletionResults(Sema &SemaRef,
- ResultBuilder &Results, Scope *S,
- QualType BaseType,
- RecordDecl *RD,
- Optional<FixItHint> AccessOpFixIt) {
+static void
+AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
+ Scope *S, QualType BaseType, RecordDecl *RD,
+ Optional<FixItHint> AccessOpFixIt) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
Results.setObjectTypeQualifiers(BaseType.getQualifiers());
@@ -3997,8 +4181,8 @@ static void AddRecordMembersCompletionResults(Sema &SemaRef,
Results.allowNestedNameSpecifiers();
std::vector<FixItHint> FixIts;
if (AccessOpFixIt)
- FixIts.emplace_back(AccessOpFixIt.getValue());
- CodeCompletionDeclConsumer Consumer(Results, SemaRef.CurContext, std::move(FixIts));
+ FixIts.emplace_back(AccessOpFixIt.getValue());
+ CodeCompletionDeclConsumer Consumer(Results, RD, BaseType, std::move(FixIts));
SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
SemaRef.CodeCompleter->includeGlobals(),
/*IncludeDependentBases=*/true,
@@ -4039,7 +4223,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
enum CodeCompletionContext::Kind contextKind;
if (IsArrow) {
- if (const PointerType *Ptr = ConvertedBaseType->getAs<PointerType>())
+ if (const auto *Ptr = ConvertedBaseType->getAs<PointerType>())
ConvertedBaseType = Ptr->getPointeeType();
}
@@ -4059,7 +4243,8 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
CodeCompleter->getCodeCompletionTUInfo(), CCContext,
&ResultBuilder::IsMember);
- auto DoCompletion = [&](Expr *Base, bool IsArrow, Optional<FixItHint> AccessOpFixIt) -> bool {
+ auto DoCompletion = [&](Expr *Base, bool IsArrow,
+ Optional<FixItHint> AccessOpFixIt) -> bool {
if (!Base)
return false;
@@ -4113,7 +4298,8 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
for (auto *I : BaseType->getAs<ObjCObjectPointerType>()->quals())
AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
CurContext, AddedProperties, Results,
- IsBaseExprStatement);
+ IsBaseExprStatement, /*IsClassProperty*/ false,
+ /*InOriginalClass*/ false);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
// Objective-C instance variable access.
@@ -4126,7 +4312,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Add all ivars from this class and its superclasses.
if (Class) {
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, Class, BaseType);
Results.setFilter(&ResultBuilder::IsObjCIvar);
LookupVisibleDecls(
Class, LookupMemberName, Consumer, CodeCompleter->includeGlobals(),
@@ -4188,8 +4374,8 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
return;
ResultBuilder::LookupFilter Filter = nullptr;
- enum CodeCompletionContext::Kind ContextKind
- = CodeCompletionContext::CCC_Other;
+ enum CodeCompletionContext::Kind ContextKind =
+ CodeCompletionContext::CCC_Other;
switch ((DeclSpec::TST)TagSpec) {
case DeclSpec::TST_enum:
Filter = &ResultBuilder::IsEnum;
@@ -4231,7 +4417,7 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
}
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
}
static void AddTypeQualifierResults(DeclSpec &DS, ResultBuilder &Results,
@@ -4255,8 +4441,7 @@ void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
Results.EnterNewScope();
AddTypeQualifierResults(DS, Results, LangOpts);
Results.ExitScope();
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -4291,6 +4476,9 @@ void Sema::CodeCompleteCase(Scope *S) {
return;
SwitchStmt *Switch = getCurFunction()->SwitchStack.back().getPointer();
+ // Condition expression might be invalid, do not continue in this case.
+ if (!Switch->getCond())
+ return;
QualType type = Switch->getCond()->IgnoreImplicit()->getType();
if (!type->isEnumeralType()) {
CodeCompleteExpressionData Data(type);
@@ -4318,9 +4506,9 @@ void Sema::CodeCompleteCase(Scope *S) {
continue;
Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
- if (EnumConstantDecl *Enumerator
- = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(CaseVal))
+ if (auto *Enumerator =
+ dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
// We look into the AST of the case statement to determine which
// enumerator was named. Alternatively, we could compute the value of
// the integral constant expression, then compare it against the
@@ -4366,7 +4554,7 @@ void Sema::CodeCompleteCase(Scope *S) {
Results.ExitScope();
if (CodeCompleter->includeMacros()) {
- AddMacroResults(PP, Results, false);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
}
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
@@ -4385,10 +4573,9 @@ static bool anyNullArguments(ArrayRef<Expr *> Args) {
typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
-static void mergeCandidatesWithResults(Sema &SemaRef,
- SmallVectorImpl<ResultCandidate> &Results,
- OverloadCandidateSet &CandidateSet,
- SourceLocation Loc) {
+static void mergeCandidatesWithResults(
+ Sema &SemaRef, SmallVectorImpl<ResultCandidate> &Results,
+ OverloadCandidateSet &CandidateSet, SourceLocation Loc) {
if (!CandidateSet.empty()) {
// Sort the overload candidate set by placing the best overloads first.
std::stable_sort(
@@ -4399,7 +4586,7 @@ static void mergeCandidatesWithResults(Sema &SemaRef,
});
// Add the remaining viable overload candidates as code-completion results.
- for (auto &Candidate : CandidateSet) {
+ for (OverloadCandidate &Candidate : CandidateSet) {
if (Candidate.Function && Candidate.Function->isDeleted())
continue;
if (Candidate.Viable)
@@ -4411,22 +4598,21 @@ static void mergeCandidatesWithResults(Sema &SemaRef,
/// Get the type of the Nth parameter from a given set of overload
/// candidates.
static QualType getParamType(Sema &SemaRef,
- ArrayRef<ResultCandidate> Candidates,
- unsigned N) {
+ ArrayRef<ResultCandidate> Candidates, unsigned N) {
// Given the overloads 'Candidates' for a function call matching all arguments
// up to N, return the type of the Nth parameter if it is the same for all
// overload candidates.
QualType ParamType;
for (auto &Candidate : Candidates) {
- if (auto FType = Candidate.getFunctionType())
- if (auto Proto = dyn_cast<FunctionProtoType>(FType))
+ if (const auto *FType = Candidate.getFunctionType())
+ if (const auto *Proto = dyn_cast<FunctionProtoType>(FType))
if (N < Proto->getNumParams()) {
if (ParamType.isNull())
ParamType = Proto->getParamType(N);
else if (!SemaRef.Context.hasSameUnqualifiedType(
- ParamType.getNonReferenceType(),
- Proto->getParamType(N).getNonReferenceType()))
+ ParamType.getNonReferenceType(),
+ Proto->getParamType(N).getNonReferenceType()))
// Otherwise return a default-constructed QualType.
return QualType();
}
@@ -4435,41 +4621,28 @@ static QualType getParamType(Sema &SemaRef,
return ParamType;
}
-static void CodeCompleteOverloadResults(Sema &SemaRef, Scope *S,
- MutableArrayRef<ResultCandidate> Candidates,
- unsigned CurrentArg,
- bool CompleteExpressionWithCurrentArg = true) {
- QualType ParamType;
- if (CompleteExpressionWithCurrentArg)
- ParamType = getParamType(SemaRef, Candidates, CurrentArg);
-
- if (ParamType.isNull())
- SemaRef.CodeCompleteOrdinaryName(S, Sema::PCC_Expression);
- else
- SemaRef.CodeCompleteExpression(S, ParamType);
-
- if (!Candidates.empty())
- SemaRef.CodeCompleter->ProcessOverloadCandidates(SemaRef, CurrentArg,
- Candidates.data(),
- Candidates.size());
+static QualType
+ProduceSignatureHelp(Sema &SemaRef, Scope *S,
+ MutableArrayRef<ResultCandidate> Candidates,
+ unsigned CurrentArg, SourceLocation OpenParLoc) {
+ if (Candidates.empty())
+ return QualType();
+ SemaRef.CodeCompleter->ProcessOverloadCandidates(
+ SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc);
+ return getParamType(SemaRef, Candidates, CurrentArg);
}
-void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
+QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
+ ArrayRef<Expr *> Args,
+ SourceLocation OpenParLoc) {
if (!CodeCompleter)
- return;
-
- // When we're code-completing for a call, we fall back to ordinary
- // name code-completion whenever we can't produce specific
- // results. We may want to revisit this strategy in the future,
- // e.g., by merging the two kinds of results.
+ return QualType();
// FIXME: Provide support for variadic template functions.
-
// Ignore type-dependent call expressions entirely.
if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
Expr::hasAnyTypeDependentArguments(Args)) {
- CodeCompleteOrdinaryName(S, PCC_Expression);
- return;
+ return QualType();
}
// Build an overload candidate set based on the functions we find.
@@ -4498,13 +4671,12 @@ void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
const bool FirstArgumentIsBase = !UME->isImplicitAccess() && UME->getBase();
AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
/*SuppressUsedConversions=*/false,
- /*PartialOverloading=*/true,
- FirstArgumentIsBase);
+ /*PartialOverloading=*/true, FirstArgumentIsBase);
} else {
FunctionDecl *FD = nullptr;
- if (auto MCE = dyn_cast<MemberExpr>(NakedFn))
+ if (auto *MCE = dyn_cast<MemberExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(MCE->getMemberDecl());
- else if (auto DRE = dyn_cast<DeclRefExpr>(NakedFn))
+ else if (auto *DRE = dyn_cast<DeclRefExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FD) { // We check whether it's a resolved function declaration.
if (!getLangOpts().CPlusPlus ||
@@ -4521,8 +4693,8 @@ void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
// call operator, so we check if it does and add them as candidates.
// A complete type is needed to lookup for member function call operators.
if (isCompleteType(Loc, NakedFn->getType())) {
- DeclarationName OpName = Context.DeclarationNames
- .getCXXOperatorName(OO_Call);
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Call);
LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
LookupQualifiedName(R, DC);
R.suppressDiagnostics();
@@ -4542,7 +4714,7 @@ void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
if (auto FP = T->getAs<FunctionProtoType>()) {
if (!TooManyArguments(FP->getNumParams(), Args.size(),
- /*PartialOverloading=*/true) ||
+ /*PartialOverloading=*/true) ||
FP->isVariadic())
Results.push_back(ResultCandidate(FP));
} else if (auto FT = T->getAs<FunctionType>())
@@ -4550,49 +4722,67 @@ void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
Results.push_back(ResultCandidate(FT));
}
}
-
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
- CodeCompleteOverloadResults(*this, S, Results, Args.size(),
- !CandidateSet.empty());
+ QualType ParamType =
+ ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+ return !CandidateSet.empty() ? ParamType : QualType();
}
-void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
- ArrayRef<Expr *> Args) {
+QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
+ SourceLocation Loc,
+ ArrayRef<Expr *> Args,
+ SourceLocation OpenParLoc) {
if (!CodeCompleter)
- return;
+ return QualType();
// A complete type is needed to lookup for constructors.
CXXRecordDecl *RD =
isCompleteType(Loc, Type) ? Type->getAsCXXRecordDecl() : nullptr;
- if (!RD) {
- CodeCompleteExpression(S, Type);
- return;
- }
+ if (!RD)
+ return Type;
// FIXME: Provide support for member initializers.
// FIXME: Provide support for variadic template constructors.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- for (auto C : LookupConstructors(RD)) {
- if (auto FD = dyn_cast<FunctionDecl>(C)) {
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()),
- Args, CandidateSet,
+ for (NamedDecl *C : LookupConstructors(RD)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(C)) {
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
+ CandidateSet,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
- } else if (auto FTD = dyn_cast<FunctionTemplateDecl>(C)) {
- AddTemplateOverloadCandidate(FTD,
- DeclAccessPair::make(FTD, C->getAccess()),
- /*ExplicitTemplateArgs=*/nullptr,
- Args, CandidateSet,
- /*SuppressUsedConversions=*/false,
- /*PartialOverloading=*/true);
+ } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, C->getAccess()),
+ /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
+ /*SuppressUsedConversions=*/false,
+ /*PartialOverloading=*/true);
}
}
SmallVector<ResultCandidate, 8> Results;
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
- CodeCompleteOverloadResults(*this, S, Results, Args.size());
+ return ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+}
+
+QualType Sema::ProduceCtorInitMemberSignatureHelp(
+ Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
+ ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc) {
+ if (!CodeCompleter)
+ return QualType();
+
+ CXXConstructorDecl *Constructor =
+ dyn_cast<CXXConstructorDecl>(ConstructorDecl);
+ if (!Constructor)
+ return QualType();
+ // FIXME: Add support for Base class constructors as well.
+ if (ValueDecl *MemberDecl = tryLookupCtorInitMemberDecl(
+ Constructor->getParent(), SS, TemplateTypeTy, II))
+ return ProduceConstructorSignatureHelp(getCurScope(), MemberDecl->getType(),
+ MemberDecl->getLocation(), ArgExprs,
+ OpenParLoc);
+ return QualType();
}
void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
@@ -4602,7 +4792,12 @@ void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
return;
}
- CodeCompleteExpression(S, VD->getType());
+ CodeCompleteExpressionData Data;
+ Data.PreferredType = VD->getType();
+ // Ignore VD to avoid completing the variable itself, e.g. in 'int foo = ^'.
+ Data.IgnoreDecls.push_back(VD);
+
+ CodeCompleteExpression(S, Data);
}
void Sema::CodeCompleteReturn(Scope *S) {
@@ -4610,9 +4805,9 @@ void Sema::CodeCompleteReturn(Scope *S) {
if (isa<BlockDecl>(CurContext)) {
if (BlockScopeInfo *BSI = getCurBlock())
ResultType = BSI->ReturnType;
- } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(CurContext))
ResultType = Function->getReturnType();
- else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
+ else if (const auto *Method = dyn_cast<ObjCMethodDecl>(CurContext))
ResultType = Method->getReturnType();
if (ResultType.isNull())
@@ -4676,21 +4871,99 @@ void Sema::CodeCompleteAfterIf(Scope *S) {
AddPrettyFunctionResults(getLangOpts(), Results);
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, false);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
- Results.data(),Results.size());
+ Results.data(), Results.size());
+}
+
+static QualType getPreferredTypeOfBinaryRHS(Sema &S, Expr *LHS,
+ tok::TokenKind Op) {
+ if (!LHS)
+ return QualType();
+
+ QualType LHSType = LHS->getType();
+ if (LHSType->isPointerType()) {
+ if (Op == tok::plus || Op == tok::plusequal || Op == tok::minusequal)
+ return S.getASTContext().getPointerDiffType();
+ // Pointer difference is more common than subtracting an int from a pointer.
+ if (Op == tok::minus)
+ return LHSType;
+ }
+
+ switch (Op) {
+ // No way to infer the type of RHS from LHS.
+ case tok::comma:
+ return QualType();
+ // Prefer the type of the left operand for all of these.
+ // Arithmetic operations.
+ case tok::plus:
+ case tok::plusequal:
+ case tok::minus:
+ case tok::minusequal:
+ case tok::percent:
+ case tok::percentequal:
+ case tok::slash:
+ case tok::slashequal:
+ case tok::star:
+ case tok::starequal:
+ // Assignment.
+ case tok::equal:
+ // Comparison operators.
+ case tok::equalequal:
+ case tok::exclaimequal:
+ case tok::less:
+ case tok::lessequal:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::spaceship:
+ return LHS->getType();
+ // Binary shifts are often overloaded, so don't try to guess those.
+ case tok::greatergreater:
+ case tok::greatergreaterequal:
+ case tok::lessless:
+ case tok::lesslessequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return S.getASTContext().IntTy;
+ return QualType();
+ // Logical operators, assume we want bool.
+ case tok::ampamp:
+ case tok::pipepipe:
+ case tok::caretcaret:
+ return S.getASTContext().BoolTy;
+ // Operators often used for bit manipulation are typically used with the type
+ // of the left argument.
+ case tok::pipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal:
+ case tok::amp:
+ case tok::ampequal:
+ if (LHSType->isIntegralOrEnumerationType())
+ return LHSType;
+ return QualType();
+ // RHS should be a pointer to a member of the 'LHS' type, but we can't give
+ // any particular type here.
+ case tok::periodstar:
+ case tok::arrowstar:
+ return QualType();
+ default:
+ // FIXME(ibiryukov): handle the missing op, re-add the assertion.
+ // assert(false && "unhandled binary op");
+ return QualType();
+ }
}
-void Sema::CodeCompleteAssignmentRHS(Scope *S, Expr *LHS) {
- if (LHS)
- CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
+void Sema::CodeCompleteBinaryRHS(Scope *S, Expr *LHS, tok::TokenKind Op) {
+ auto PreferredType = getPreferredTypeOfBinaryRHS(*this, LHS, Op);
+ if (!PreferredType.isNull())
+ CodeCompleteExpression(S, PreferredType);
else
CodeCompleteOrdinaryName(S, PCC_Expression);
}
void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
- bool EnteringContext) {
+ bool EnteringContext, QualType BaseType) {
if (SS.isEmpty() || !CodeCompleter)
return;
@@ -4699,7 +4972,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// it can be useful for global code completion which have information about
// contexts/symbols that are not in the AST.
if (SS.isInvalid()) {
- CodeCompletionContext CC(CodeCompletionContext::CCC_Name);
+ CodeCompletionContext CC(CodeCompletionContext::CCC_Symbol);
CC.setCXXScopeSpecifier(SS);
HandleCodeCompleteResults(this, CodeCompleter, CC, nullptr, 0);
return;
@@ -4717,7 +4990,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_Name);
+ CodeCompletionContext::CCC_Symbol);
Results.EnterNewScope();
// The "template" keyword can follow "::" in the grammar, but only
@@ -4737,7 +5010,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
if (CodeCompleter->includeNamespaceLevelDecls() ||
(!Ctx->isNamespace() && !Ctx->isTranslationUnit())) {
- CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ CodeCompletionDeclConsumer Consumer(Results, Ctx, BaseType);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer,
/*IncludeGlobalScope=*/true,
/*IncludeDependentBases=*/true,
@@ -4757,7 +5030,10 @@ void Sema::CodeCompleteUsing(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ // This can be both a using alias or using
+ // declaration, in the former we expect a new name and a
+ // symbol in the latter case.
+ CodeCompletionContext::CCC_SymbolOrNewName,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
@@ -4797,7 +5073,7 @@ void Sema::CodeCompleteUsingDirective(Scope *S) {
Results.data(), Results.size());
}
-void Sema::CodeCompleteNamespaceDecl(Scope *S) {
+void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
@@ -4805,14 +5081,14 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!S->getParent())
Ctx = Context.getTranslationUnitDecl();
- bool SuppressedGlobalResults
- = Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
+ bool SuppressedGlobalResults =
+ Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
- ? CodeCompletionContext::CCC_Namespace
- : CodeCompletionContext::CCC_Other,
+ ? CodeCompletionContext::CCC_Namespace
+ : CodeCompletionContext::CCC_Other,
&ResultBuilder::IsNamespace);
if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
@@ -4822,7 +5098,8 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
// definition of each namespace.
std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
for (DeclContext::specific_decl_iterator<NamespaceDecl>
- NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
+ NS(Ctx->decls_begin()),
+ NSEnd(Ctx->decls_end());
NS != NSEnd; ++NS)
OrigToLatest[NS->getOriginalNamespace()] = *NS;
@@ -4830,22 +5107,21 @@ void Sema::CodeCompleteNamespaceDecl(Scope *S) {
// namespace to the list of results.
Results.EnterNewScope();
for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
- NS = OrigToLatest.begin(),
- NSEnd = OrigToLatest.end();
+ NS = OrigToLatest.begin(),
+ NSEnd = OrigToLatest.end();
NS != NSEnd; ++NS)
- Results.AddResult(CodeCompletionResult(
- NS->second, Results.getBasePriority(NS->second),
- nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ CodeCompletionResult(NS->second, Results.getBasePriority(NS->second),
+ nullptr),
+ CurContext, nullptr, false);
Results.ExitScope();
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
-void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
+void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
if (!CodeCompleter)
return;
@@ -4858,9 +5134,8 @@ void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals(),
CodeCompleter->loadExternal());
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteOperatorName(Scope *S) {
@@ -4875,8 +5150,8 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
Results.EnterNewScope();
// Add the names of overloadable operators.
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- if (std::strcmp(Spelling, "?")) \
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ if (std::strcmp(Spelling, "?")) \
Results.AddResult(Result(Spelling));
#include "clang/Basic/OperatorKinds.def"
@@ -4896,20 +5171,19 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
}
void Sema::CodeCompleteConstructorInitializer(
- Decl *ConstructorD,
- ArrayRef <CXXCtorInitializer *> Initializers) {
+ Decl *ConstructorD, ArrayRef<CXXCtorInitializer *> Initializers) {
if (!ConstructorD)
return;
AdjustDeclIfTemplate(ConstructorD);
- CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
+ auto *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
if (!Constructor)
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_PotentiallyQualifiedName);
+ CodeCompletionContext::CCC_Symbol);
Results.EnterNewScope();
// Fill in any already-initialized fields or base classes.
@@ -4917,39 +5191,96 @@ void Sema::CodeCompleteConstructorInitializer(
llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
for (unsigned I = 0, E = Initializers.size(); I != E; ++I) {
if (Initializers[I]->isBaseInitializer())
- InitializedBases.insert(
- Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
+ InitializedBases.insert(Context.getCanonicalType(
+ QualType(Initializers[I]->getBaseClass(), 0)));
else
- InitializedFields.insert(cast<FieldDecl>(
- Initializers[I]->getAnyMember()));
+ InitializedFields.insert(
+ cast<FieldDecl>(Initializers[I]->getAnyMember()));
}
// Add completions for base classes.
- CodeCompletionBuilder Builder(Results.getAllocator(),
- Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
bool SawLastInitializer = Initializers.empty();
CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ auto GenerateCCS = [&](const NamedDecl *ND, const char *Name) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Name);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (const auto *Function = dyn_cast<FunctionDecl>(ND))
+ AddFunctionParameterChunks(PP, Policy, Function, Builder);
+ else if (const auto *FunTemplDecl = dyn_cast<FunctionTemplateDecl>(ND))
+ AddFunctionParameterChunks(PP, Policy, FunTemplDecl->getTemplatedDecl(),
+ Builder);
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ return Builder.TakeString();
+ };
+ auto AddDefaultCtorInit = [&](const char *Name, const char *Type,
+ const NamedDecl *ND) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Name);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk(Type);
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ if (ND) {
+ auto CCR = CodeCompletionResult(
+ Builder.TakeString(), ND,
+ SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration);
+ if (isa<FieldDecl>(ND))
+ CCR.CursorKind = CXCursor_MemberRef;
+ return Results.AddResult(CCR);
+ }
+ return Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(),
+ SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration));
+ };
+ auto AddCtorsWithName = [&](const CXXRecordDecl *RD, unsigned int Priority,
+ const char *Name, const FieldDecl *FD) {
+ if (!RD)
+ return AddDefaultCtorInit(Name,
+ FD ? Results.getAllocator().CopyString(
+ FD->getType().getAsString(Policy))
+ : Name,
+ FD);
+ auto Ctors = getConstructors(Context, RD);
+ if (Ctors.begin() == Ctors.end())
+ return AddDefaultCtorInit(Name, Name, RD);
+ for (const NamedDecl *Ctor : Ctors) {
+ auto CCR = CodeCompletionResult(GenerateCCS(Ctor, Name), RD, Priority);
+ CCR.CursorKind = getCursorKindForDecl(Ctor);
+ Results.AddResult(CCR);
+ }
+ };
+ auto AddBase = [&](const CXXBaseSpecifier &Base) {
+ const char *BaseName =
+ Results.getAllocator().CopyString(Base.getType().getAsString(Policy));
+ const auto *RD = Base.getType()->getAsCXXRecordDecl();
+ AddCtorsWithName(
+ RD, SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration,
+ BaseName, nullptr);
+ };
+ auto AddField = [&](const FieldDecl *FD) {
+ const char *FieldName =
+ Results.getAllocator().CopyString(FD->getIdentifier()->getName());
+ const CXXRecordDecl *RD = FD->getType()->getAsCXXRecordDecl();
+ AddCtorsWithName(
+ RD, SawLastInitializer ? CCP_NextInitializer : CCP_MemberDeclaration,
+ FieldName, FD);
+ };
+
for (const auto &Base : ClassDecl->bases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(Base.getType(),
- QualType(Initializers.back()->getBaseClass(), 0));
+ SawLastInitializer =
+ !Initializers.empty() && Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(
+ Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
- Builder.AddTypedTextChunk(
- Results.getAllocator().CopyString(
- Base.getType().getAsString(Policy)));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration));
+ AddBase(Base);
SawLastInitializer = false;
}
@@ -4957,23 +5288,14 @@ void Sema::CodeCompleteConstructorInitializer(
for (const auto &Base : ClassDecl->vbases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isBaseInitializer() &&
- Context.hasSameUnqualifiedType(Base.getType(),
- QualType(Initializers.back()->getBaseClass(), 0));
+ SawLastInitializer =
+ !Initializers.empty() && Initializers.back()->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(
+ Base.getType(), QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
- Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(
- Base.getType().getAsString(Policy)));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration));
+ AddBase(Base);
SawLastInitializer = false;
}
@@ -4981,27 +5303,16 @@ void Sema::CodeCompleteConstructorInitializer(
for (auto *Field : ClassDecl->fields()) {
if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
.second) {
- SawLastInitializer
- = !Initializers.empty() &&
- Initializers.back()->isAnyMemberInitializer() &&
- Initializers.back()->getAnyMember() == Field;
+ SawLastInitializer = !Initializers.empty() &&
+ Initializers.back()->isAnyMemberInitializer() &&
+ Initializers.back()->getAnyMember() == Field;
continue;
}
if (!Field->getDeclName())
continue;
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Field->getIdentifier()->getName()));
- Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddPlaceholderChunk("args");
- Builder.AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- SawLastInitializer? CCP_NextInitializer
- : CCP_MemberDeclaration,
- CXCursor_MemberRef,
- CXAvailability_Available,
- Field));
+ AddField(Field);
SawLastInitializer = false;
}
Results.ExitScope();
@@ -5042,9 +5353,7 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
for (; S && !isNamespaceScope(S); S = S->getParent()) {
for (const auto *D : S->decls()) {
const auto *Var = dyn_cast<VarDecl>(D);
- if (!Var ||
- !Var->hasLocalStorage() ||
- Var->hasAttr<BlocksAttr>())
+ if (!Var || !Var->hasLocalStorage() || Var->hasAttr<BlocksAttr>())
continue;
if (Known.insert(Var->getIdentifier()).second)
@@ -5065,26 +5374,25 @@ void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
/// Macro that optionally prepends an "@" to the string literal passed in via
/// Keyword, depending on whether NeedAt is true or false.
-#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) ((NeedAt)? "@" Keyword : Keyword)
+#define OBJC_AT_KEYWORD_NAME(NeedAt, Keyword) ((NeedAt) ? "@" Keyword : Keyword)
static void AddObjCImplementationResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an implementation, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "end")));
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
- if (LangOpts.ObjC2) {
+ if (LangOpts.ObjC) {
// @dynamic
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"dynamic"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "dynamic"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
// @synthesize
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "synthesize"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
@@ -5092,22 +5400,21 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
}
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an interface or protocol, we can end it.
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "end")));
- if (LangOpts.ObjC2) {
+ if (LangOpts.ObjC) {
// @property
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "property")));
// @required
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "required")));
// @optional
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "optional")));
}
}
@@ -5117,7 +5424,7 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
Results.getCodeCompletionTUInfo());
// @class name ;
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "class"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
@@ -5126,26 +5433,27 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
// @interface name
// FIXME: Could introduce the whole pattern, including superclasses and
// such.
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "interface"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
// @protocol name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "protocol"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("protocol");
Results.AddResult(Result(Builder.TakeString()));
// @implementation name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "implementation"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
}
// @compatibility_alias name
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
+ Builder.AddTypedTextChunk(
+ OBJC_AT_KEYWORD_NAME(NeedAt, "compatibility_alias"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("alias");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -5188,7 +5496,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
Results.getSema().getLangOpts().ConstStrings)
EncodeType = "const char[]";
Builder.AddResultTypeChunk(EncodeType);
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"encode"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "encode"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5196,7 +5504,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @protocol ( protocol-name )
Builder.AddResultTypeChunk("Protocol *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "protocol"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("protocol-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5204,7 +5512,7 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @selector ( selector )
Builder.AddResultTypeChunk("SEL");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"selector"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "selector"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -5212,21 +5520,21 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
// @"string"
Builder.AddResultTypeChunk("NSString *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"\""));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "\""));
Builder.AddPlaceholderChunk("string");
Builder.AddTextChunk("\"");
Results.AddResult(Result(Builder.TakeString()));
// @[objects, ...]
Builder.AddResultTypeChunk("NSArray *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"["));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "["));
Builder.AddPlaceholderChunk("objects, ...");
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Results.AddResult(Result(Builder.TakeString()));
// @{key : object, ...}
Builder.AddResultTypeChunk("NSDictionary *");
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "{"));
Builder.AddPlaceholderChunk("key");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -5250,7 +5558,7 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"try"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "try"));
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
@@ -5269,14 +5577,14 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
}
// @throw
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "throw"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
- Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "synchronized"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
@@ -5289,14 +5597,13 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
}
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
- ResultBuilder &Results,
- bool NeedAt) {
+ ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"private")));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"protected")));
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"public")));
- if (LangOpts.ObjC2)
- Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"package")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "private")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "protected")));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "public")));
+ if (LangOpts.ObjC)
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt, "package")));
}
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
@@ -5348,14 +5655,12 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
return true;
// Check for more than one of { assign, copy, retain, strong, weak }.
- unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
- ObjCDeclSpec::DQ_PR_unsafe_unretained |
- ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain |
- ObjCDeclSpec::DQ_PR_strong |
- ObjCDeclSpec::DQ_PR_weak);
- if (AssignCopyRetMask &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
+ unsigned AssignCopyRetMask =
+ Attributes &
+ (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_weak);
+ if (AssignCopyRetMask && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
@@ -5433,11 +5738,10 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
enum ObjCMethodKind {
MK_Any, ///< Any kind of method, provided it means other specified criteria.
MK_ZeroArgSelector, ///< Zero-argument (unary) selector.
- MK_OneArgSelector ///< One-argument selector.
+ MK_OneArgSelector ///< One-argument selector.
};
-static bool isAcceptableObjCSelector(Selector Sel,
- ObjCMethodKind WantKind,
+static bool isAcceptableObjCSelector(Selector Sel, ObjCMethodKind WantKind,
ArrayRef<IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
unsigned NumSelIdents = SelIdents.size();
@@ -5445,9 +5749,12 @@ static bool isAcceptableObjCSelector(Selector Sel,
return false;
switch (WantKind) {
- case MK_Any: break;
- case MK_ZeroArgSelector: return Sel.isUnarySelector();
- case MK_OneArgSelector: return Sel.getNumArgs() == 1;
+ case MK_Any:
+ break;
+ case MK_ZeroArgSelector:
+ return Sel.isUnarySelector();
+ case MK_OneArgSelector:
+ return Sel.getNumArgs() == 1;
}
if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
@@ -5468,11 +5775,9 @@ static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
AllowSameLength);
}
-namespace {
- /// A set of selectors, which is used to avoid introducing multiple
- /// completions with the same selector into the result set.
- typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
-}
+/// A set of selectors, which is used to avoid introducing multiple
+/// completions with the same selector into the result set.
+typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
/// Add all of the Objective-C methods in the given Objective-C
/// container to the set of results.
@@ -5505,7 +5810,7 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
Container = getContainerDef(Container);
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
IsRootClass = IsRootClass || (IFace && !IFace->getSuperClass());
- for (auto *M : Container->methods()) {
+ for (ObjCMethodDecl *M : Container->methods()) {
// The instance methods on the root class can be messaged via the
// metaclass.
if (M->isInstanceMethod() == WantInstanceMethods ||
@@ -5522,16 +5827,16 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = (WantKind != MK_Any);
if (!InOriginalClass)
- R.Priority += CCD_InBaseClass;
+ setInBaseClass(R);
Results.MaybeAddResult(R, CurContext);
}
}
// Visit the protocols of protocols.
- if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (const auto *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition()) {
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -5544,19 +5849,19 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
return;
// Add methods in protocols.
- for (auto *I : IFace->protocols())
+ for (ObjCProtocolDecl *I : IFace->protocols())
AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents, CurContext,
Selectors, AllowSameLength, Results, false, IsRootClass);
// Add methods in categories.
- for (auto *CatDecl : IFace->known_categories()) {
+ for (ObjCCategoryDecl *CatDecl : IFace->known_categories()) {
AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength, Results,
InOriginalClass, IsRootClass);
// Add a categories protocol methods.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = CatDecl->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -5584,13 +5889,12 @@ static void AddObjCMethods(ObjCContainerDecl *Container,
IsRootClass);
}
-
void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
// Try to find the interface where getters might live.
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
- if (ObjCCategoryDecl *Category
- = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ if (ObjCCategoryDecl *Category =
+ dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -5613,11 +5917,10 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
// Try to find the interface where setters might live.
- ObjCInterfaceDecl *Class
- = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
- if (ObjCCategoryDecl *Category
- = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ if (ObjCCategoryDecl *Category =
+ dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
@@ -5631,8 +5934,8 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext,
- Selectors, /*AllowSameLength=*/true, Results);
+ AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext, Selectors,
+ /*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
@@ -5663,9 +5966,9 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
ObjCDeclSpec::DQ_Oneway)) == 0) {
- Results.AddResult("bycopy");
- Results.AddResult("byref");
- Results.AddResult("oneway");
+ Results.AddResult("bycopy");
+ Results.AddResult("byref");
+ Results.AddResult("oneway");
}
if ((DS.getObjCDeclQualifier() & ObjCDeclSpec::DQ_CSNullability) == 0) {
Results.AddResult("nonnull");
@@ -5710,7 +6013,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
CodeCompleter->loadExternal());
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, false);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
@@ -5721,7 +6024,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
/// common uses of Objective-C. This routine returns that class type,
/// or NULL if no better result could be determined.
static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
- ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
+ auto *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
if (!Msg)
return nullptr;
@@ -5741,8 +6044,8 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
ObjCInterfaceDecl *IFace = nullptr;
switch (Msg->getReceiverKind()) {
case ObjCMessageExpr::Class:
- if (const ObjCObjectType *ObjType
- = Msg->getClassReceiver()->getAs<ObjCObjectType>())
+ if (const ObjCObjectType *ObjType =
+ Msg->getClassReceiver()->getAs<ObjCObjectType>())
IFace = ObjType->getInterface();
break;
@@ -5764,27 +6067,27 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
ObjCInterfaceDecl *Super = IFace->getSuperClass();
if (Method->isInstanceMethod())
return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
- .Case("retain", IFace)
- .Case("strong", IFace)
- .Case("autorelease", IFace)
- .Case("copy", IFace)
- .Case("copyWithZone", IFace)
- .Case("mutableCopy", IFace)
- .Case("mutableCopyWithZone", IFace)
- .Case("awakeFromCoder", IFace)
- .Case("replacementObjectFromCoder", IFace)
+ .Case("retain", IFace)
+ .Case("strong", IFace)
+ .Case("autorelease", IFace)
+ .Case("copy", IFace)
+ .Case("copyWithZone", IFace)
+ .Case("mutableCopy", IFace)
+ .Case("mutableCopyWithZone", IFace)
+ .Case("awakeFromCoder", IFace)
+ .Case("replacementObjectFromCoder", IFace)
+ .Case("class", IFace)
+ .Case("classForCoder", IFace)
+ .Case("superclass", Super)
+ .Default(nullptr);
+
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("new", IFace)
+ .Case("alloc", IFace)
+ .Case("allocWithZone", IFace)
.Case("class", IFace)
- .Case("classForCoder", IFace)
.Case("superclass", Super)
.Default(nullptr);
-
- return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
- .Case("new", IFace)
- .Case("alloc", IFace)
- .Case("allocWithZone", IFace)
- .Case("class", IFace)
- .Case("superclass", Super)
- .Default(nullptr);
}
// Add a special completion for a message send to "super", which fills in the
@@ -5803,10 +6106,10 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
///
/// \returns the Objective-C method declaration that would be invoked by
/// this "super" completion. If NULL, no completion was added.
-static ObjCMethodDecl *AddSuperSendCompletion(
- Sema &S, bool NeedSuperKeyword,
- ArrayRef<IdentifierInfo *> SelIdents,
- ResultBuilder &Results) {
+static ObjCMethodDecl *
+AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
+ ArrayRef<IdentifierInfo *> SelIdents,
+ ResultBuilder &Results) {
ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
if (!CurMethod)
return nullptr;
@@ -5826,7 +6129,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
if (!SuperMethod) {
for (const auto *Cat : Class->known_categories()) {
if ((SuperMethod = Cat->getMethod(CurMethod->getSelector(),
- CurMethod->isInstanceMethod())))
+ CurMethod->isInstanceMethod())))
break;
}
}
@@ -5841,8 +6144,8 @@ static ObjCMethodDecl *AddSuperSendCompletion(
return nullptr;
for (ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin(),
- CurPEnd = CurMethod->param_end(),
- SuperP = SuperMethod->param_begin();
+ CurPEnd = CurMethod->param_end(),
+ SuperP = SuperMethod->param_begin();
CurP != CurPEnd; ++CurP, ++SuperP) {
// Make sure the parameter types are compatible.
if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
@@ -5860,8 +6163,7 @@ static ObjCMethodDecl *AddSuperSendCompletion(
// Give this completion a return type.
AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
- Results.getCompletionContext().getBaseType(),
- Builder);
+ Results.getCompletionContext().getBaseType(), Builder);
// If we need the "super" keyword, add it (plus some spacing).
if (NeedSuperKeyword) {
@@ -5872,11 +6174,11 @@ static ObjCMethodDecl *AddSuperSendCompletion(
Selector Sel = CurMethod->getSelector();
if (Sel.isUnarySelector()) {
if (NeedSuperKeyword)
- Builder.AddTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
else
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
} else {
ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
@@ -5885,20 +6187,17 @@ static ObjCMethodDecl *AddSuperSendCompletion(
if (I < SelIdents.size())
Builder.AddInformativeChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
else if (NeedSuperKeyword || I > SelIdents.size()) {
Builder.AddTextChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
- (*CurP)->getIdentifier()->getName()));
+ (*CurP)->getIdentifier()->getName()));
} else {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(
- Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
- (*CurP)->getIdentifier()->getName()));
+ (*CurP)->getIdentifier()->getName()));
}
}
}
@@ -5910,12 +6209,13 @@ static ObjCMethodDecl *AddSuperSendCompletion(
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext::CCC_ObjCMessageReceiver,
- getLangOpts().CPlusPlus11
- ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
- : &ResultBuilder::IsObjCMessageReceiver);
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCMessageReceiver,
+ getLangOpts().CPlusPlus11
+ ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
+ : &ResultBuilder::IsObjCMessageReceiver);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
@@ -5939,10 +6239,9 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
Results.ExitScope();
if (CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, false);
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
-
}
void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
@@ -5965,8 +6264,7 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
// send [super ...] is actually calling an instance method on the
// current object.
return CodeCompleteObjCInstanceMessage(S, nullptr, SelIdents,
- AtArgumentExpression,
- CDecl);
+ AtArgumentExpression, CDecl);
}
// Fall through to send to the superclass in CDecl.
@@ -5974,13 +6272,12 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
// "super" may be the name of a type or variable. Figure out which
// it is.
IdentifierInfo *Super = getSuperIdentifier();
- NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
- LookupOrdinaryName);
+ NamedDecl *ND = LookupSingleName(S, Super, SuperLoc, LookupOrdinaryName);
if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
// "super" names an interface. Use it.
} else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
- if (const ObjCObjectType *Iface
- = Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
+ if (const ObjCObjectType *Iface =
+ Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
CDecl = Iface->getInterface();
} else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
// "super" names an unresolved type; we can't be more specific.
@@ -5990,11 +6287,10 @@ void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
- ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
- false, false);
+ ExprResult SuperExpr =
+ ActOnIdExpression(S, SS, TemplateKWLoc, id, false, false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
- SelIdents,
- AtArgumentExpression);
+ SelIdents, AtArgumentExpression);
}
// Fall through
@@ -6025,8 +6321,8 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
if (R.Priority <= BestPriority) {
const ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
if (NumSelIdents <= Method->param_size()) {
- QualType MyPreferredType = Method->parameters()[NumSelIdents - 1]
- ->getType();
+ QualType MyPreferredType =
+ Method->parameters()[NumSelIdents - 1]->getType();
if (R.Priority < BestPriority || PreferredType.isNull()) {
BestPriority = R.Priority;
PreferredType = MyPreferredType;
@@ -6045,8 +6341,7 @@ static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
- bool AtArgumentExpression,
- bool IsSuper,
+ bool AtArgumentExpression, bool IsSuper,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
ObjCInterfaceDecl *CDecl = nullptr;
@@ -6067,8 +6362,8 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
// If this is a send-to-super, try to add the special "super" send
// completion.
if (IsSuper) {
- if (ObjCMethodDecl *SuperMethod
- = AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
+ if (ObjCMethodDecl *SuperMethod =
+ AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
@@ -6079,9 +6374,8 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
VisitedSelectorSet Selectors;
if (CDecl)
- AddObjCMethods(CDecl, false, MK_Any, SelIdents,
- SemaRef.CurContext, Selectors, AtArgumentExpression,
- Results);
+ AddObjCMethods(CDecl, false, MK_Any, SelIdents, SemaRef.CurContext,
+ Selectors, AtArgumentExpression, Results);
else {
// We're messaging "id" as a type; provide all class/factory methods.
@@ -6100,11 +6394,10 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
}
for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
- MEnd = SemaRef.MethodPool.end();
+ MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -6127,10 +6420,11 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
QualType T = this->GetTypeFromParser(Receiver);
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
- T, SelIdents));
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage, T,
+ SelIdents));
AddClassMessageCompletions(*this, S, Receiver, SelIdents,
AtArgumentExpression, IsSuper, Results);
@@ -6141,8 +6435,8 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
- SelIdents.size());
+ QualType PreferredType =
+ getPreferredArgumentTypeForMessageSend(Results, SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
@@ -6150,8 +6444,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
@@ -6171,10 +6464,11 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
RecExpr = Conv.get();
}
- QualType ReceiverType = RecExpr? RecExpr->getType()
- : Super? Context.getObjCObjectPointerType(
+ QualType ReceiverType = RecExpr
+ ? RecExpr->getType()
+ : Super ? Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(Super))
- : Context.getObjCIdType();
+ : Context.getObjCIdType();
// If we're messaging an expression with type "id" or "Class", check
// whether we know something special about the receiver that allows
@@ -6182,13 +6476,12 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
if (ReceiverType->isObjCClassType())
- return CodeCompleteObjCClassMessage(S,
- ParsedType::make(Context.getObjCInterfaceType(IFace)),
- SelIdents,
- AtArgumentExpression, Super);
+ return CodeCompleteObjCClassMessage(
+ S, ParsedType::make(Context.getObjCInterfaceType(IFace)), SelIdents,
+ AtArgumentExpression, Super);
- ReceiverType = Context.getObjCObjectPointerType(
- Context.getObjCInterfaceType(IFace));
+ ReceiverType =
+ Context.getObjCObjectPointerType(Context.getObjCInterfaceType(IFace));
}
} else if (RecExpr && getLangOpts().CPlusPlus) {
ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
@@ -6199,18 +6492,19 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
}
// Build the set of methods we can see.
- ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
- ReceiverType, SelIdents));
+ ResultBuilder Results(
+ *this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
+ ReceiverType, SelIdents));
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
// completion.
if (Super) {
- if (ObjCMethodDecl *SuperMethod
- = AddSuperSendCompletion(*this, false, SelIdents, Results))
+ if (ObjCMethodDecl *SuperMethod =
+ AddSuperSendCompletion(*this, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
@@ -6229,30 +6523,29 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ReceiverType->isObjCQualifiedClassType()) {
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
- AddObjCMethods(ClassDecl, false, MK_Any, SelIdents,
- CurContext, Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
}
}
// Handle messages to a qualified ID ("id<foo>").
- else if (const ObjCObjectPointerType *QualID
- = ReceiverType->getAsObjCQualifiedIdType()) {
+ else if (const ObjCObjectPointerType *QualID =
+ ReceiverType->getAsObjCQualifiedIdType()) {
// Search protocols for instance methods.
for (auto *I : QualID->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
- Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AtArgumentExpression, Results);
}
// Handle messages to a pointer to interface type.
- else if (const ObjCObjectPointerType *IFacePtr
- = ReceiverType->getAsObjCInterfacePointerType()) {
+ else if (const ObjCObjectPointerType *IFacePtr =
+ ReceiverType->getAsObjCInterfacePointerType()) {
// Search the class, its superclasses, etc., for instance methods.
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
- CurContext, Selectors, AtArgumentExpression,
- Results);
+ CurContext, Selectors, AtArgumentExpression, Results);
// Search protocols for instance methods.
for (auto *I : IFacePtr->quals())
- AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
- Selectors, AtArgumentExpression, Results);
+ AddObjCMethods(I, true, MK_Any, SelIdents, CurContext, Selectors,
+ AtArgumentExpression, Results);
}
// Handle messages to "id".
else if (ReceiverType->isObjCIdType()) {
@@ -6276,8 +6569,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -6294,15 +6586,14 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
}
Results.ExitScope();
-
// If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
// Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
- QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
- SelIdents.size());
+ QualType PreferredType =
+ getPreferredArgumentTypeForMessageSend(Results, SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
@@ -6310,9 +6601,8 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
return;
}
- HandleCodeCompleteResults(this, CodeCompleter,
- Results.getCompletionContext(),
- Results.data(),Results.size());
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
}
void Sema::CodeCompleteObjCForCollection(Scope *S,
@@ -6336,8 +6626,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
- I != N; ++I) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
+ ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
@@ -6351,7 +6641,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
- MEnd = MethodPool.end();
+ MEnd = MethodPool.end();
M != MEnd; ++M) {
Selector Sel = M->first;
@@ -6361,8 +6651,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (Sel.isUnarySelector()) {
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
Results.AddResult(Builder.TakeString());
continue;
}
@@ -6371,8 +6661,8 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
if (I == SelIdents.size()) {
if (!Accumulator.empty()) {
- Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
- Accumulator));
+ Builder.AddInformativeChunk(
+ Builder.getAllocator().CopyString(Accumulator));
Accumulator.clear();
}
}
@@ -6380,7 +6670,7 @@ void Sema::CodeCompleteObjCSelector(Scope *S,
Accumulator += Sel.getNameForSlot(I);
Accumulator += ':';
}
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(Accumulator));
Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
@@ -6400,13 +6690,14 @@ static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
if (!OnlyForwardDeclarations || !Proto->hasDefinition())
- Results.AddResult(Result(Proto, Results.getBasePriority(Proto),nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Proto, Results.getBasePriority(Proto), nullptr), CurContext,
+ nullptr, false);
}
}
void Sema::CodeCompleteObjCProtocolReferences(
- ArrayRef<IdentifierLocPair> Protocols) {
+ ArrayRef<IdentifierLocPair> Protocols) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
@@ -6418,8 +6709,7 @@ void Sema::CodeCompleteObjCProtocolReferences(
// already seen.
// FIXME: This doesn't work when caching code-completion results.
for (const IdentifierLocPair &Pair : Protocols)
- if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first,
- Pair.second))
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Pair.first, Pair.second))
Results.Ignore(Protocol);
// Add all protocols.
@@ -6465,8 +6755,9 @@ static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
- Results.AddResult(Result(Class, Results.getBasePriority(Class),nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Class, Results.getBasePriority(Class), nullptr), CurContext,
+ nullptr, false);
}
}
@@ -6496,8 +6787,8 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
Results.Ignore(CurClass);
@@ -6543,9 +6834,10 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
// Ignore any categories we find that have already been implemented by this
// interface.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
- if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)){
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (ObjCInterfaceDecl *Class =
+ dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)) {
for (const auto *Cat : Class->visible_categories())
CategoryNames.insert(Cat->getIdentifier());
}
@@ -6556,9 +6848,9 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
- Results.AddResult(Result(Category, Results.getBasePriority(Category),
- nullptr),
- CurContext, nullptr, false);
+ Results.AddResult(
+ Result(Category, Results.getBasePriority(Category), nullptr),
+ CurContext, nullptr, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
@@ -6573,8 +6865,8 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
// Find the corresponding interface. If we couldn't find the interface, the
// program itself is ill-formed. However, we'll try to be helpful still by
// providing the list of all of the categories we know about.
- NamedDecl *CurClass
- = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ NamedDecl *CurClass =
+ LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
@@ -6609,15 +6901,13 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
CodeCompletionContext CCContext(CodeCompletionContext::CCC_Other);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompleter->getCodeCompletionTUInfo(),
- CCContext);
+ CodeCompleter->getCodeCompletionTUInfo(), CCContext);
// Figure out where this @synthesize lives.
- ObjCContainerDecl *Container
- = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
- !isa<ObjCCategoryImplDecl>(Container)))
+ ObjCContainerDecl *Container =
+ dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
return;
// Ignore any properties that have already been implemented.
@@ -6629,8 +6919,8 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
// Add any properties that we find.
AddedPropertiesSet AddedProperties;
Results.EnterNewScope();
- if (ObjCImplementationDecl *ClassImpl
- = dyn_cast<ObjCImplementationDecl>(Container))
+ if (ObjCImplementationDecl *ClassImpl =
+ dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
/*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
@@ -6645,37 +6935,37 @@ void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
- IdentifierInfo *PropertyName) {
+void Sema::CodeCompleteObjCPropertySynthesizeIvar(
+ Scope *S, IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
- ObjCContainerDecl *Container
- = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
- if (!Container ||
- (!isa<ObjCImplementationDecl>(Container) &&
- !isa<ObjCCategoryImplDecl>(Container)))
+ ObjCContainerDecl *Container =
+ dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container || (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
return;
// Figure out which interface we're looking into.
ObjCInterfaceDecl *Class = nullptr;
- if (ObjCImplementationDecl *ClassImpl
- = dyn_cast<ObjCImplementationDecl>(Container))
+ if (ObjCImplementationDecl *ClassImpl =
+ dyn_cast<ObjCImplementationDecl>(Container))
Class = ClassImpl->getClassInterface();
else
- Class = cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl()
- ->getClassInterface();
+ Class = cast<ObjCCategoryImplDecl>(Container)
+ ->getCategoryDecl()
+ ->getClassInterface();
// Determine the type of the property we're synthesizing.
QualType PropertyType = Context.getObjCIdType();
if (Class) {
if (ObjCPropertyDecl *Property = Class->FindPropertyDeclaration(
PropertyName, ObjCPropertyQueryKind::OBJC_PR_query_instance)) {
- PropertyType
- = Property->getType().getNonReferenceType().getUnqualifiedType();
+ PropertyType =
+ Property->getType().getNonReferenceType().getUnqualifiedType();
// Give preference to ivars
Results.setPreferredType(PropertyType);
@@ -6690,7 +6980,7 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
NameWithPrefix += PropertyName->getName();
std::string NameWithSuffix = PropertyName->getName().str();
NameWithSuffix += '_';
- for(; Class; Class = Class->getSuperClass()) {
+ for (; Class; Class = Class->getSuperClass()) {
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
@@ -6706,8 +6996,8 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
// Reduce the priority of this result by one, to give it a slight
// advantage over other results whose names don't match so closely.
if (Results.size() &&
- Results.data()[Results.size() - 1].Kind
- == CodeCompletionResult::RK_Declaration &&
+ Results.data()[Results.size() - 1].Kind ==
+ CodeCompletionResult::RK_Declaration &&
Results.data()[Results.size() - 1].Declaration == Ivar)
Results.data()[Results.size() - 1].Priority--;
}
@@ -6721,14 +7011,14 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
- Priority,CXAvailability_Available);
+ Priority, CXAvailability_Available);
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
- Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
- Policy, Allocator));
+ Builder.AddResultTypeChunk(
+ GetCompletionTypeString(PropertyType, Context, Policy, Allocator));
Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
- Results.AddResult(Result(Builder.TakeString(), Priority,
- CXCursor_ObjCIvarDecl));
+ Results.AddResult(
+ Result(Builder.TakeString(), Priority, CXCursor_ObjCIvarDecl));
}
Results.ExitScope();
@@ -6739,8 +7029,9 @@ void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
// Mapping from selectors to the methods that implement that selector, along
// with the "in original class" flag.
-typedef llvm::DenseMap<
- Selector, llvm::PointerIntPair<ObjCMethodDecl *, 1, bool> > KnownMethodsMap;
+typedef llvm::DenseMap<Selector,
+ llvm::PointerIntPair<ObjCMethodDecl *, 1, bool>>
+ KnownMethodsMap;
/// Find all of the methods that reside in the given container
/// (and its superclasses, protocols, etc.) that meet the given
@@ -6760,8 +7051,8 @@ static void FindImplementableMethods(ASTContext &Context,
IFace = IFace->getDefinition();
Container = IFace;
- const ObjCList<ObjCProtocolDecl> &Protocols
- = IFace->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -6777,14 +7068,14 @@ static void FindImplementableMethods(ASTContext &Context,
// Visit the superclass.
if (IFace->getSuperClass())
FindImplementableMethods(Context, IFace->getSuperClass(),
- WantInstanceMethods, ReturnType,
- KnownMethods, false);
+ WantInstanceMethods, ReturnType, KnownMethods,
+ false);
}
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
// Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Category->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Category->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
@@ -6806,10 +7097,10 @@ static void FindImplementableMethods(ASTContext &Context,
Container = Protocol;
// Recurse into protocols.
- const ObjCList<ObjCProtocolDecl> &Protocols
- = Protocol->getReferencedProtocols();
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
- E = Protocols.end();
+ E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, false);
@@ -6832,8 +7123,7 @@ static void FindImplementableMethods(ASTContext &Context,
/// Add the parenthesized return or parameter type chunk to a code
/// completion string.
-static void AddObjCPassingTypeChunk(QualType Type,
- unsigned ObjCDeclQuals,
+static void AddObjCPassingTypeChunk(QualType Type, unsigned ObjCDeclQuals,
ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionBuilder &Builder) {
@@ -6841,15 +7131,14 @@ static void AddObjCPassingTypeChunk(QualType Type,
std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals, Type);
if (!Quals.empty())
Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
- Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
- Builder.getAllocator()));
+ Builder.AddTextChunk(
+ GetCompletionTypeString(Type, Context, Policy, Builder.getAllocator()));
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
/// Determine whether the given class is or inherits from a class by
/// the given name.
-static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
- StringRef Name) {
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class, StringRef Name) {
if (!Class)
return false;
@@ -6863,8 +7152,7 @@ static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
/// Key-Value Observing (KVO).
static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
bool IsInstanceMethod,
- QualType ReturnType,
- ASTContext &Context,
+ QualType ReturnType, ASTContext &Context,
VisitedSelectorSet &KnownSelectors,
ResultBuilder &Results) {
IdentifierInfo *PropName = Property->getIdentifier();
@@ -6889,7 +7177,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
const char *CopiedKey;
KeyHolder(CodeCompletionAllocator &Allocator, StringRef Key)
- : Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
+ : Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
operator const char *() {
if (CopiedKey)
@@ -6904,19 +7192,19 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
- bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
- Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
- Property->getType());
- bool ReturnTypeMatchesVoid
- = ReturnType.isNull() || ReturnType->isVoidType();
+ bool ReturnTypeMatchesProperty =
+ ReturnType.isNull() ||
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Property->getType());
+ bool ReturnTypeMatchesVoid = ReturnType.isNull() || ReturnType->isVoidType();
// Add the normal accessor -(type)key.
if (IsInstanceMethod &&
KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
if (ReturnType.isNull())
- AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
- Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
+ Builder);
Builder.AddTypedTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
@@ -6928,9 +7216,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod &&
((!ReturnType.isNull() &&
(ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
- (ReturnType.isNull() &&
- (Property->getType()->isIntegerType() ||
- Property->getType()->isBooleanType())))) {
+ (ReturnType.isNull() && (Property->getType()->isIntegerType() ||
+ Property->getType()->isBooleanType())))) {
std::string SelectorName = (Twine("is") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -6941,8 +7228,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
@@ -6960,11 +7246,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
Builder.AddTypedTextChunk(":");
- AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
- Context, Policy, Builder);
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0, Context, Policy,
+ Builder);
Builder.AddTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
@@ -6976,8 +7261,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
unsigned IndexedSetterPriority = CCP_CodePattern;
unsigned UnorderedGetterPriority = CCP_CodePattern;
unsigned UnorderedSetterPriority = CCP_CodePattern;
- if (const ObjCObjectPointerType *ObjCPointer
- = Property->getType()->getAs<ObjCObjectPointerType>()) {
+ if (const auto *ObjCPointer =
+ Property->getType()->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
// If this interface type is not provably derived from a known
// collection, penalize the corresponding completions.
@@ -7013,12 +7298,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
- Builder.AddTypedTextChunk(
- Allocator.CopyString(SelectorId->getName()));
- Results.AddResult(Result(Builder.TakeString(),
- std::min(IndexedGetterPriority,
- UnorderedGetterPriority),
- CXCursor_ObjCInstanceMethodDecl));
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(
+ Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority, UnorderedGetterPriority),
+ CXCursor_ObjCInstanceMethodDecl));
}
}
@@ -7026,8 +7310,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add -(id)objectInKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
- std::string SelectorName
- = (Twine("objectIn") + UpperKey + "AtIndex").str();
+ std::string SelectorName = (Twine("objectIn") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7051,10 +7334,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSArray"))) {
- std::string SelectorName
- = (Twine(Property->getName()) + "AtIndexes").str();
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSArray"))) {
+ std::string SelectorName = (Twine(Property->getName()) + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7076,10 +7359,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Add -(void)getKey:(type **)buffer range:(NSRange)inRange
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("get") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("range")
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7110,10 +7391,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get("insertObject"),
- &Context.Idents.get(SelectorName)
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7142,10 +7421,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("insert") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("atIndexes")
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7172,8 +7449,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
+ std::string SelectorName =
+ (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7194,8 +7471,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("remove") + UpperKey + "AtIndexes").str();
+ std::string SelectorName = (Twine("remove") + UpperKey + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7216,12 +7492,10 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName),
- &Context.Idents.get("withObject")
- };
+ std::string SelectorName =
+ (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7248,13 +7522,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName1
- = (Twine("replace") + UpperKey + "AtIndexes").str();
+ std::string SelectorName1 =
+ (Twine("replace") + UpperKey + "AtIndexes").str();
std::string SelectorName2 = (Twine("with") + UpperKey).str();
- IdentifierInfo *SelectorIds[2] = {
- &Context.Idents.get(SelectorName1),
- &Context.Idents.get(SelectorName2)
- };
+ IdentifierInfo *SelectorIds[2] = {&Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
@@ -7285,8 +7557,9 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSEnumerator"))) {
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
@@ -7299,7 +7572,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
- CXCursor_ObjCInstanceMethodDecl));
+ CXCursor_ObjCInstanceMethodDecl));
}
}
@@ -7322,9 +7595,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
} else {
- Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
- Policy,
- Builder.getAllocator()));
+ Builder.AddTextChunk(GetCompletionTypeString(
+ ReturnType, Context, Policy, Builder.getAllocator()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
@@ -7336,8 +7608,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// Mutable unordered accessors
// - (void)addKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("add") + UpperKey + Twine("Object")).str();
+ std::string SelectorName =
+ (Twine("add") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7380,8 +7652,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
// - (void)removeKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
- std::string SelectorName
- = (Twine("remove") + UpperKey + Twine("Object")).str();
+ std::string SelectorName =
+ (Twine("remove") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
@@ -7449,10 +7721,11 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
- ->getName() == "NSSet"))) {
- std::string SelectorName
- = (Twine("keyPathsForValuesAffecting") + UpperKey).str();
+ ReturnType->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl()
+ ->getName() == "NSSet"))) {
+ std::string SelectorName =
+ (Twine("keyPathsForValuesAffecting") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
@@ -7464,17 +7737,16 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
- CXCursor_ObjCClassMethodDecl));
+ CXCursor_ObjCClassMethodDecl));
}
}
// + (BOOL)automaticallyNotifiesObserversForKey
if (!IsInstanceMethod &&
- (ReturnType.isNull() ||
- ReturnType->isIntegerType() ||
+ (ReturnType.isNull() || ReturnType->isIntegerType() ||
ReturnType->isBooleanType())) {
- std::string SelectorName
- = (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
+ std::string SelectorName =
+ (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
@@ -7486,7 +7758,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
- CXCursor_ObjCClassMethodDecl));
+ CXCursor_ObjCClassMethodDecl));
}
}
}
@@ -7498,8 +7770,8 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
QualType ReturnType = GetTypeFromParser(ReturnTy);
Decl *IDecl = nullptr;
if (CurContext->isObjCContainer()) {
- ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
- IDecl = OCD;
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ IDecl = OCD;
}
// Determine where we should start searching for methods.
ObjCContainerDecl *SearchDecl = nullptr;
@@ -7508,8 +7780,8 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
SearchDecl = Impl->getClassInterface();
IsInImplementation = true;
- } else if (ObjCCategoryImplDecl *CatImpl
- = dyn_cast<ObjCCategoryImplDecl>(D)) {
+ } else if (ObjCCategoryImplDecl *CatImpl =
+ dyn_cast<ObjCCategoryImplDecl>(D)) {
SearchDecl = CatImpl->getCategoryDecl();
IsInImplementation = true;
} else
@@ -7523,15 +7795,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (!SearchDecl) {
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
- nullptr, 0);
+ CodeCompletionContext::CCC_Other, nullptr, 0);
return;
}
// Find all of the methods that we could declare/implement here.
KnownMethodsMap KnownMethods;
- FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
- ReturnType, KnownMethods);
+ FindImplementableMethods(Context, SearchDecl, IsInstanceMethod, ReturnType,
+ KnownMethods);
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
@@ -7541,7 +7812,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Results.EnterNewScope();
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
- MEnd = KnownMethods.end();
+ MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.getPointer();
CodeCompletionBuilder Builder(Results.getAllocator(),
@@ -7558,21 +7829,20 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
if (ReturnType.isNull()) {
QualType ResTy = Method->getSendResultType().stripObjCKindOfType(Context);
AttributedType::stripOuterNullability(ResTy);
- AddObjCPassingTypeChunk(ResTy,
- Method->getObjCDeclQualifier(), Context, Policy,
- Builder);
+ AddObjCPassingTypeChunk(ResTy, Method->getObjCDeclQualifier(), Context,
+ Policy, Builder);
}
Selector Sel = Method->getSelector();
// Add the first part of the selector to the pattern.
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Sel.getNameForSlot(0)));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(0)));
// Add parameters to the pattern.
unsigned I = 0;
for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
- PEnd = Method->param_end();
+ PEnd = Method->param_end();
P != PEnd; (void)++P, ++I) {
// Add the part of the selector name.
if (I == 0)
@@ -7580,7 +7850,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
else if (I < Sel.getNumArgs()) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
} else
break;
@@ -7590,16 +7860,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParamType = (*P)->getType();
else
ParamType = (*P)->getOriginalType();
- ParamType = ParamType.substObjCTypeArgs(Context, {},
- ObjCSubstitutionContext::Parameter);
+ ParamType = ParamType.substObjCTypeArgs(
+ Context, {}, ObjCSubstitutionContext::Parameter);
AttributedType::stripOuterNullability(ParamType);
- AddObjCPassingTypeChunk(ParamType,
- (*P)->getObjCDeclQualifier(),
- Context, Policy,
- Builder);
+ AddObjCPassingTypeChunk(ParamType, (*P)->getObjCDeclQualifier(), Context,
+ Policy, Builder);
if (IdentifierInfo *Id = (*P)->getIdentifier())
- Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(Id->getName()));
}
if (Method->isVariadic()) {
@@ -7627,25 +7895,24 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
}
unsigned Priority = CCP_CodePattern;
+ auto R = Result(Builder.TakeString(), Method, Priority);
if (!M->second.getInt())
- Priority += CCD_InBaseClass;
-
- Results.AddResult(Result(Builder.TakeString(), Method, Priority));
+ setInBaseClass(R);
+ Results.AddResult(std::move(R));
}
// Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
// the properties in this class and its categories.
- if (Context.getLangOpts().ObjC2) {
+ if (Context.getLangOpts().ObjC) {
SmallVector<ObjCContainerDecl *, 4> Containers;
Containers.push_back(SearchDecl);
VisitedSelectorSet KnownSelectors;
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
- MEnd = KnownMethods.end();
+ MEnd = KnownMethods.end();
M != MEnd; ++M)
KnownSelectors.insert(M->first);
-
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
if (!IFace)
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
@@ -7669,16 +7936,14 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
Results.data(), Results.size());
}
-void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
- bool IsInstanceMethod,
- bool AtParameterName,
- ParsedType ReturnTy,
- ArrayRef<IdentifierInfo *> SelIdents) {
+void Sema::CodeCompleteObjCMethodDeclSelector(
+ Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnTy,
+ ArrayRef<IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
- I != N; ++I) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); I != N;
+ ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
@@ -7700,10 +7965,9 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
- for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
- &M->second.second;
- MethList && MethList->getMethod();
- MethList = MethList->getNext()) {
+ for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first
+ : &M->second.second;
+ MethList && MethList->getMethod(); MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
@@ -7718,7 +7982,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- Param->getIdentifier()->getName()));
+ Param->getIdentifier()->getName()));
Results.AddResult(Builder.TakeString());
}
}
@@ -7864,7 +8128,7 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("arguments");
Results.AddResult(Builder.TakeString());
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
// #import "header"
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
@@ -7916,29 +8180,27 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
}
void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
- CodeCompleteOrdinaryName(S,
- S->getFnParent()? Sema::PCC_RecoveryInFunction
- : Sema::PCC_Namespace);
+ CodeCompleteOrdinaryName(S, S->getFnParent() ? Sema::PCC_RecoveryInFunction
+ : Sema::PCC_Namespace);
}
void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
- IsDefinition? CodeCompletionContext::CCC_MacroName
- : CodeCompletionContext::CCC_MacroNameUse);
+ IsDefinition ? CodeCompletionContext::CCC_MacroName
+ : CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
// Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
- MEnd = PP.macro_end();
+ MEnd = PP.macro_end();
M != MEnd; ++M) {
- Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
- M->first->getName()));
- Results.AddResult(CodeCompletionResult(Builder.TakeString(),
- CCP_CodePattern,
- CXCursor_MacroDefinition));
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(M->first->getName()));
+ Results.AddResult(CodeCompletionResult(
+ Builder.TakeString(), CCP_CodePattern, CXCursor_MacroDefinition));
}
Results.ExitScope();
} else if (IsDefinition) {
@@ -7955,9 +8217,11 @@ void Sema::CodeCompletePreprocessorExpression() {
CodeCompletionContext::CCC_PreprocessorExpression);
if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Results, true);
+ AddMacroResults(PP, Results,
+ CodeCompleter ? CodeCompleter->loadExternal() : false,
+ true);
- // defined (<macro>)
+ // defined (<macro>)
Results.EnterNewScope();
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -7984,10 +8248,118 @@ void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
// for the expanded tokens.
}
+// This handles completion inside an #include filename, e.g. #include <foo/ba
+// We look for the directory "foo" under each directory on the include path,
+// list its files, and reassemble the appropriate #include.
+void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
+ // RelDir should use /, but unescaped \ is possible on windows!
+ // Our completions will normalize to / for simplicity, this case is rare.
+ std::string RelDir = llvm::sys::path::convert_to_slash(Dir);
+ // We need the native slashes for the actual file system interactions.
+ SmallString<128> NativeRelDir = StringRef(RelDir);
+ llvm::sys::path::native(NativeRelDir);
+ auto FS = getSourceManager().getFileManager().getVirtualFileSystem();
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_IncludedFile);
+ llvm::DenseSet<StringRef> SeenResults; // To deduplicate results.
+
+ // Helper: adds one file or directory completion result.
+ auto AddCompletion = [&](StringRef Filename, bool IsDirectory) {
+ SmallString<64> TypedChunk = Filename;
+ // Directory completion is up to the slash, e.g. <sys/
+ TypedChunk.push_back(IsDirectory ? '/' : Angled ? '>' : '"');
+ auto R = SeenResults.insert(TypedChunk);
+ if (R.second) { // New completion
+ const char *InternedTyped = Results.getAllocator().CopyString(TypedChunk);
+ *R.first = InternedTyped; // Avoid dangling StringRef.
+ CodeCompletionBuilder Builder(CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(InternedTyped);
+ // The result is a "Pattern", which is pretty opaque.
+ // We may want to include the real filename to allow smart ranking.
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ }
+ };
+
+ // Helper: scans IncludeDir for nice files, and adds results for each.
+ auto AddFilesFromIncludeDir = [&](StringRef IncludeDir, bool IsSystem) {
+ llvm::SmallString<128> Dir = IncludeDir;
+ if (!NativeRelDir.empty())
+ llvm::sys::path::append(Dir, NativeRelDir);
+
+ std::error_code EC;
+ unsigned Count = 0;
+ for (auto It = FS->dir_begin(Dir, EC);
+ !EC && It != llvm::vfs::directory_iterator(); It.increment(EC)) {
+ if (++Count == 2500) // If we happen to hit a huge directory,
+ break; // bail out early so we're not too slow.
+ StringRef Filename = llvm::sys::path::filename(It->path());
+ switch (It->type()) {
+ case llvm::sys::fs::file_type::directory_file:
+ AddCompletion(Filename, /*IsDirectory=*/true);
+ break;
+ case llvm::sys::fs::file_type::regular_file:
+ // Only files that really look like headers. (Except in system dirs).
+ if (!IsSystem) {
+ // Header extensions from Types.def, which we can't depend on here.
+ if (!(Filename.endswith_lower(".h") ||
+ Filename.endswith_lower(".hh") ||
+ Filename.endswith_lower(".hpp") ||
+ Filename.endswith_lower(".inc")))
+ break;
+ }
+ AddCompletion(Filename, /*IsDirectory=*/false);
+ break;
+ default:
+ break;
+ }
+ }
+ };
+
+ // Helper: adds results relative to IncludeDir, if possible.
+ auto AddFilesFromDirLookup = [&](const DirectoryLookup &IncludeDir,
+ bool IsSystem) {
+ switch (IncludeDir.getLookupType()) {
+ case DirectoryLookup::LT_HeaderMap:
+ // header maps are not (currently) enumerable.
+ break;
+ case DirectoryLookup::LT_NormalDir:
+ AddFilesFromIncludeDir(IncludeDir.getDir()->getName(), IsSystem);
+ break;
+ case DirectoryLookup::LT_Framework:
+ AddFilesFromIncludeDir(IncludeDir.getFrameworkDir()->getName(), IsSystem);
+ break;
+ }
+ };
+
+ // Finally with all our helpers, we can scan the include path.
+ // Do this in standard order so deduplication keeps the right file.
+ // (In case we decide to add more details to the results later).
+ const auto &S = PP.getHeaderSearchInfo();
+ using llvm::make_range;
+ if (!Angled) {
+ // The current directory is on the include path for "quoted" includes.
+ auto *CurFile = PP.getCurrentFileLexer()->getFileEntry();
+ if (CurFile && CurFile->getDir())
+ AddFilesFromIncludeDir(CurFile->getDir()->getName(), false);
+ for (const auto &D : make_range(S.quoted_dir_begin(), S.quoted_dir_end()))
+ AddFilesFromDirLookup(D, false);
+ }
+ for (const auto &D : make_range(S.angled_dir_begin(), S.angled_dir_end()))
+ AddFilesFromDirLookup(D, false);
+ for (const auto &D : make_range(S.system_dir_begin(), S.system_dir_end()))
+ AddFilesFromDirLookup(D, true);
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteNaturalLanguage() {
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_NaturalLanguage,
- nullptr, 0);
+ CodeCompletionContext::CCC_NaturalLanguage, nullptr,
+ 0);
}
void Sema::CodeCompleteAvailabilityPlatformName() {
@@ -8006,9 +8378,9 @@ void Sema::CodeCompleteAvailabilityPlatformName() {
Results.data(), Results.size());
}
-void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- SmallVectorImpl<CodeCompletionResult> &Results) {
+void Sema::GatherGlobalCodeCompletions(
+ CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo,
+ SmallVectorImpl<CodeCompletionResult> &Results) {
ResultBuilder Builder(*this, Allocator, CCTUInfo,
CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
@@ -8020,9 +8392,11 @@ void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
}
if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Builder, true);
+ AddMacroResults(PP, Builder,
+ CodeCompleter ? CodeCompleter->loadExternal() : false,
+ true);
Results.clear();
- Results.insert(Results.end(),
- Builder.data(), Builder.data() + Builder.size());
+ Results.insert(Results.end(), Builder.data(),
+ Builder.data() + Builder.size());
}
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index 1d5454ca778b..181efa6d3dd0 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -84,8 +84,7 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
// ref-qualifier or with the & ref-qualifier
// -- "rvalue reference to cv X" for functions declared with the &&
// ref-qualifier
- QualType T =
- MD->getThisType(S.Context)->getAs<PointerType>()->getPointeeType();
+ QualType T = MD->getThisType()->getAs<PointerType>()->getPointeeType();
T = FnType->getRefQualifier() == RQ_RValue
? S.Context.getRValueReferenceType(T)
: S.Context.getLValueReferenceType(T, /*SpelledAsLValue*/ true);
@@ -453,7 +452,7 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// to bool.
ExprResult Conv = S.PerformContextuallyConvertToBool(AwaitReady);
if (Conv.isInvalid()) {
- S.Diag(AwaitReady->getDirectCallee()->getLocStart(),
+ S.Diag(AwaitReady->getDirectCallee()->getBeginLoc(),
diag::note_await_ready_no_bool_conversion);
S.Diag(Loc, diag::note_coroutine_promise_call_implicitly_required)
<< AwaitReady->getDirectCallee() << E->getSourceRange();
@@ -506,7 +505,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
auto *FD = cast<FunctionDecl>(CurContext);
bool IsThisDependentType = [&] {
if (auto *MD = dyn_cast_or_null<CXXMethodDecl>(FD))
- return MD->isInstance() && MD->getThisType(Context)->isDependentType();
+ return MD->isInstance() && MD->getThisType()->isDependentType();
else
return false;
}();
@@ -565,8 +564,8 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
// Create an initialization sequence for the promise type using the
// constructor arguments, wrapped in a parenthesized list expression.
- Expr *PLE = new (Context) ParenListExpr(Context, FD->getLocation(),
- CtorArgExprs, FD->getLocation());
+ Expr *PLE = ParenListExpr::Create(Context, FD->getLocation(),
+ CtorArgExprs, FD->getLocation());
InitializedEntity Entity = InitializedEntity::InitializeVariable(VD);
InitializationKind Kind = InitializationKind::CreateForInit(
VD->getLocation(), /*DirectInit=*/true, PLE);
@@ -647,7 +646,7 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
return StmtError();
Suspend = BuildResolvedCoawaitExpr(Loc, Suspend.get(),
/*IsImplicit*/ true);
- Suspend = ActOnFinishFullExpr(Suspend.get());
+ Suspend = ActOnFinishFullExpr(Suspend.get(), /*DiscardedValue*/ false);
if (Suspend.isInvalid()) {
Diag(Loc, diag::note_coroutine_promise_suspend_implicitly_required)
<< ((Name == "initial_suspend") ? 0 : 1);
@@ -841,6 +840,19 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
E = R.get();
}
+ // Move the return value if we can
+ if (E) {
+ auto NRVOCandidate = this->getCopyElisionCandidate(E->getType(), E, CES_AsIfByStdMove);
+ if (NRVOCandidate) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeResult(Loc, E->getType(), NRVOCandidate);
+ ExprResult MoveResult = this->PerformMoveOrCopyInitialization(
+ Entity, NRVOCandidate, E->getType(), E);
+ if (MoveResult.get())
+ E = MoveResult.get();
+ }
+ }
+
// FIXME: If the operand is a reference to a variable that's about to go out
// of scope, we should treat the operand as an xvalue for this overload
// resolution.
@@ -855,7 +867,7 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
if (PC.isInvalid())
return StmtError();
- Expr *PCE = ActOnFinishFullExpr(PC.get()).get();
+ Expr *PCE = ActOnFinishFullExpr(PC.get(), /*DiscardedValue*/ false).get();
Stmt *Res = new (Context) CoreturnStmt(Loc, E, PCE, IsImplicit);
return Res;
@@ -1224,7 +1236,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
ExprResult NewExpr =
S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
- NewExpr = S.ActOnFinishFullExpr(NewExpr.get());
+ NewExpr = S.ActOnFinishFullExpr(NewExpr.get(), /*DiscardedValue*/ false);
if (NewExpr.isInvalid())
return false;
@@ -1250,7 +1262,8 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
ExprResult DeleteExpr =
S.ActOnCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
- DeleteExpr = S.ActOnFinishFullExpr(DeleteExpr.get());
+ DeleteExpr =
+ S.ActOnFinishFullExpr(DeleteExpr.get(), /*DiscardedValue*/ false);
if (DeleteExpr.isInvalid())
return false;
@@ -1335,7 +1348,8 @@ bool CoroutineStmtBuilder::makeOnException() {
ExprResult UnhandledException = buildPromiseCall(S, Fn.CoroutinePromise, Loc,
"unhandled_exception", None);
- UnhandledException = S.ActOnFinishFullExpr(UnhandledException.get(), Loc);
+ UnhandledException = S.ActOnFinishFullExpr(UnhandledException.get(), Loc,
+ /*DiscardedValue*/ false);
if (UnhandledException.isInvalid())
return false;
@@ -1388,7 +1402,8 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
"get_return_object type must no longer be dependent");
if (FnRetType->isVoidType()) {
- ExprResult Res = S.ActOnFinishFullExpr(this->ReturnValue, Loc);
+ ExprResult Res =
+ S.ActOnFinishFullExpr(this->ReturnValue, Loc, /*DiscardedValue*/ false);
if (Res.isInvalid())
return false;
@@ -1420,7 +1435,7 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
if (Res.isInvalid())
return false;
- Res = S.ActOnFinishFullExpr(Res.get());
+ Res = S.ActOnFinishFullExpr(Res.get(), /*DiscardedValue*/ false);
if (Res.isInvalid())
return false;
@@ -1460,7 +1475,7 @@ static Expr *castForMoving(Sema &S, Expr *E, QualType T = QualType()) {
T = E->getType();
QualType TargetType = S.BuildReferenceType(
T, /*SpelledAsLValue*/ false, SourceLocation(), DeclarationName());
- SourceLocation ExprLoc = E->getLocStart();
+ SourceLocation ExprLoc = E->getBeginLoc();
TypeSourceInfo *TargetLoc =
S.Context.getTrivialTypeSourceInfo(TargetType, ExprLoc);
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index b92d76ad4204..23c99d45a78d 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -1735,12 +1735,13 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
FixItHint &Hint) {
if (isa<LabelDecl>(D)) {
- SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(),
- tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true);
+ SourceLocation AfterColon = Lexer::findLocationAfterToken(
+ D->getEndLoc(), tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(),
+ true);
if (AfterColon.isInvalid())
return;
- Hint = FixItHint::CreateRemoval(CharSourceRange::
- getCharRange(D->getLocStart(), AfterColon));
+ Hint = FixItHint::CreateRemoval(
+ CharSourceRange::getCharRange(D->getBeginLoc(), AfterColon));
}
}
@@ -2110,7 +2111,7 @@ void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
// Allow multiple definitions for ObjC built-in typedefs.
// FIXME: Verify the underlying types are equivalent!
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
const IdentifierInfo *TypeID = New->getIdentifier();
switch (TypeID->getLength()) {
default: break;
@@ -2474,14 +2475,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
- NewAttr = S.mergeInternalLinkageAttr(
- D, InternalLinkageA->getRange(),
- &S.Context.Idents.get(InternalLinkageA->getSpelling()),
- AttrSpellingListIndex);
+ NewAttr = S.mergeInternalLinkageAttr(D, *InternalLinkageA);
else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
- NewAttr = S.mergeCommonAttr(D, CommonA->getRange(),
- &S.Context.Idents.get(CommonA->getSpelling()),
- AttrSpellingListIndex);
+ NewAttr = S.mergeCommonAttr(D, *CommonA);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
@@ -3249,20 +3245,15 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// Redeclarations or specializations of a function or function template
// with a declared return type that uses a placeholder type shall also
// use that placeholder, not a deduced type.
- QualType OldDeclaredReturnType =
- (Old->getTypeSourceInfo()
- ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>()
- : OldType)->getReturnType();
- QualType NewDeclaredReturnType =
- (New->getTypeSourceInfo()
- ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
- : NewType)->getReturnType();
+ QualType OldDeclaredReturnType = Old->getDeclaredReturnType();
+ QualType NewDeclaredReturnType = New->getDeclaredReturnType();
if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
- !((NewQType->isDependentType() || OldQType->isDependentType()) &&
- New->isLocalExternDecl())) {
+ canFullyTypeCheckRedeclaration(New, Old, NewDeclaredReturnType,
+ OldDeclaredReturnType)) {
QualType ResQT;
if (NewDeclaredReturnType->isObjCObjectPointerType() &&
OldDeclaredReturnType->isObjCObjectPointerType())
+ // FIXME: This does the wrong thing for a deduced return type.
ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
if (ResQT.isNull()) {
if (New->isCXXClassMember() && New->isOutOfLine())
@@ -3427,13 +3418,11 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
if (OldQTypeForComparison == NewQType)
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
- if ((NewQType->isDependentType() || OldQType->isDependentType()) &&
- New->isLocalExternDecl()) {
- // It's OK if we couldn't merge types for a local function declaraton
- // if either the old or new type is dependent. We'll merge the types
- // when we instantiate the function.
+ // If the types are imprecise (due to dependent constructs in friends or
+ // local extern declarations), it's OK if they differ. We'll check again
+ // during instantiation.
+ if (!canFullyTypeCheckRedeclaration(New, Old, NewQType, OldQType))
return false;
- }
// Fall through for conflicting redeclarations and redefinitions.
}
@@ -4356,8 +4345,8 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
Record = UT->getDecl();
if (Record && getLangOpts().MicrosoftExt) {
- Diag(DS.getLocStart(), diag::ext_ms_anonymous_record)
- << Record->isUnion() << DS.getSourceRange();
+ Diag(DS.getBeginLoc(), diag::ext_ms_anonymous_record)
+ << Record->isUnion() << DS.getSourceRange();
return BuildMicrosoftCAnonymousStruct(S, DS, Record);
}
@@ -4380,8 +4369,8 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
if (!DS.isMissingDeclaratorOk()) {
// Customize diagnostic for a typedef missing a name.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
- Diag(DS.getLocStart(), diag::ext_typedef_without_a_name)
- << DS.getSourceRange();
+ Diag(DS.getBeginLoc(), diag::ext_typedef_without_a_name)
+ << DS.getSourceRange();
else
DeclaresAnything = false;
}
@@ -4405,7 +4394,7 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
if (!DeclaresAnything) {
// In C, we allow this as a (popular) extension / bug. Don't bother
// producing further diagnostics for redundant qualifiers after this.
- Diag(DS.getLocStart(), diag::ext_no_declarators) << DS.getSourceRange();
+ Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
return TagD;
}
@@ -4815,14 +4804,11 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Create a declaration for this anonymous struct/union.
NamedDecl *Anon = nullptr;
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
- Anon = FieldDecl::Create(Context, OwningClass,
- DS.getLocStart(),
- Record->getLocation(),
- /*IdentifierInfo=*/nullptr,
- Context.getTypeDeclType(Record),
- TInfo,
- /*BitWidth=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
+ Anon = FieldDecl::Create(
+ Context, OwningClass, DS.getBeginLoc(), Record->getLocation(),
+ /*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo,
+ /*BitWidth=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
if (getLangOpts().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
@@ -4837,11 +4823,9 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
SC = SC_None;
}
- Anon = VarDecl::Create(Context, Owner,
- DS.getLocStart(),
+ Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
- Context.getTypeDeclType(Record),
- TInfo, SC);
+ Context.getTypeDeclType(Record), TInfo, SC);
// Default-initialize the implicit variable. This initialization will be
// trivial in almost all cases, except if a union member has an in-class
@@ -4913,15 +4897,11 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
QualType RecTy = Context.getTypeDeclType(Record);
// Create a declaration for this anonymous struct.
- NamedDecl *Anon = FieldDecl::Create(Context,
- ParentDecl,
- DS.getLocStart(),
- DS.getLocStart(),
- /*IdentifierInfo=*/nullptr,
- RecTy,
- TInfo,
- /*BitWidth=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
+ NamedDecl *Anon =
+ FieldDecl::Create(Context, ParentDecl, DS.getBeginLoc(), DS.getBeginLoc(),
+ /*IdentifierInfo=*/nullptr, RecTy, TInfo,
+ /*BitWidth=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
Anon->setImplicit();
// Add the anonymous struct object to the current context.
@@ -4962,7 +4942,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
case UnqualifiedIdKind::IK_ImplicitSelfParam:
case UnqualifiedIdKind::IK_Identifier:
NameInfo.setName(Name.Identifier);
- NameInfo.setLoc(Name.StartLocation);
return NameInfo;
case UnqualifiedIdKind::IK_DeductionGuideName: {
@@ -4989,14 +4968,12 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
NameInfo.setName(
Context.DeclarationNames.getCXXDeductionGuideName(Template));
- NameInfo.setLoc(Name.StartLocation);
return NameInfo;
}
case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
- NameInfo.setLoc(Name.StartLocation);
NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
= Name.OperatorFunctionId.SymbolLocations[0];
NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
@@ -5006,7 +4983,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
case UnqualifiedIdKind::IK_LiteralOperatorId:
NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
Name.Identifier));
- NameInfo.setLoc(Name.StartLocation);
NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
return NameInfo;
@@ -5017,7 +4993,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName(
Context.getCanonicalType(Ty)));
- NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
@@ -5029,7 +5004,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(Ty)));
- NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
@@ -5051,7 +5025,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(CurClassType)));
- NameInfo.setLoc(Name.StartLocation);
// FIXME: should we retrieve TypeSourceInfo?
NameInfo.setNamedTypeInfo(nullptr);
return NameInfo;
@@ -5064,7 +5037,6 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(Ty)));
- NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
@@ -5349,9 +5321,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
return ActOnDecompositionDeclarator(S, D, TemplateParamLists);
} else if (!Name) {
if (!D.isInvalidType()) // Reject this if we think it is valid.
- Diag(D.getDeclSpec().getLocStart(),
- diag::err_declarator_need_ident)
- << D.getDeclSpec().getSourceRange() << D.getSourceRange();
+ Diag(D.getDeclSpec().getBeginLoc(), diag::err_declarator_need_ident)
+ << D.getDeclSpec().getSourceRange() << D.getSourceRange();
return nullptr;
} else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
return nullptr;
@@ -5542,15 +5513,8 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
// If this has an identifier and is not a function template specialization,
// add it to the scope stack.
- if (New->getDeclName() && AddToScope) {
- // Only make a locally-scoped extern declaration visible if it is the first
- // declaration of this entity. Qualified lookup for such an entity should
- // only find this declaration if there is no visible declaration of it.
- bool AddToContext = !D.isRedeclaration() || !New->isLocalExternDecl();
- PushOnScopeChains(New, S, AddToContext);
- if (!AddToContext)
- CurContext->addHiddenDecl(New);
- }
+ if (New->getDeclName() && AddToScope)
+ PushOnScopeChains(New, S);
if (isInOpenMPDeclareTargetContext())
checkDeclIsAllowedInOpenMPTarget(nullptr, New);
@@ -5604,11 +5568,13 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
if (VLATy->getElementType()->isVariablyModifiedType())
return QualType();
- llvm::APSInt Res;
+ Expr::EvalResult Result;
if (!VLATy->getSizeExpr() ||
- !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
+ !VLATy->getSizeExpr()->EvaluateAsInt(Result, Context))
return QualType();
+ llvm::APSInt Res = Result.Val.getInt();
+
// Check whether the array size is negative.
if (Res.isSigned() && Res.isNegative()) {
SizeIsNegative = true;
@@ -5897,10 +5863,10 @@ isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
return true;
}
-static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
+static void SetNestedNameSpecifier(Sema &S, DeclaratorDecl *DD, Declarator &D) {
CXXScopeSpec &SS = D.getCXXScopeSpec();
if (!SS.isSet()) return;
- DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext()));
+ DD->setQualifierInfo(SS.getWithLocInContext(S.Context));
}
bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
@@ -6021,14 +5987,14 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
// The [[lifetimebound]] attribute can be applied to the implicit object
// parameter of a non-static member function (other than a ctor or dtor)
// by applying it to the function type.
- if (ATL.getAttrKind() == AttributedType::attr_lifetimebound) {
+ if (const auto *A = ATL.getAttrAs<LifetimeBoundAttr>()) {
const auto *MD = dyn_cast<CXXMethodDecl>(FD);
if (!MD || MD->isStatic()) {
- S.Diag(ATL.getAttrNameLoc(), diag::err_lifetimebound_no_object_param)
- << !MD << ATL.getLocalSourceRange();
+ S.Diag(A->getLocation(), diag::err_lifetimebound_no_object_param)
+ << !MD << A->getRange();
} else if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) {
- S.Diag(ATL.getAttrNameLoc(), diag::err_lifetimebound_ctor_dtor)
- << isa<CXXDestructorDecl>(MD) << ATL.getLocalSourceRange();
+ S.Diag(A->getLocation(), diag::err_lifetimebound_ctor_dtor)
+ << isa<CXXDestructorDecl>(MD) << A->getRange();
}
}
}
@@ -6383,7 +6349,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// address space qualifiers.
if (R->isEventT()) {
if (R.getAddressSpace() != LangAS::opencl_private) {
- Diag(D.getLocStart(), diag::err_event_t_addr_space_qual);
+ Diag(D.getBeginLoc(), diag::err_event_t_addr_space_qual);
D.setInvalidType();
}
}
@@ -6457,9 +6423,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
VarTemplateDecl *NewTemplate = nullptr;
TemplateParameterList *TemplateParams = nullptr;
if (!getLangOpts().CPlusPlus) {
- NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
- D.getIdentifierLoc(), II,
- R, TInfo, SC);
+ NewVD = VarDecl::Create(Context, DC, D.getBeginLoc(), D.getIdentifierLoc(),
+ II, R, TInfo, SC);
if (R->getContainedDeducedType())
ParsingInitForAutoVars.insert(NewVD);
@@ -6521,7 +6486,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
TemplateParams = MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
+ D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
@@ -6580,11 +6545,11 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewVD = cast<VarDecl>(Res.get());
AddToScope = false;
} else if (D.isDecompositionDeclarator()) {
- NewVD = DecompositionDecl::Create(Context, DC, D.getLocStart(),
+ NewVD = DecompositionDecl::Create(Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), R, TInfo, SC,
Bindings);
} else
- NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
+ NewVD = VarDecl::Create(Context, DC, D.getBeginLoc(),
D.getIdentifierLoc(), II, R, TInfo, SC);
// If this is supposed to be a variable template, create it as such.
@@ -6606,7 +6571,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewTemplate->setInvalidDecl();
}
- SetNestedNameSpecifier(NewVD, D);
+ SetNestedNameSpecifier(*this, NewVD, D);
// If we have any template parameter lists that don't directly belong to
// the variable (matching the scope specifier), store them.
@@ -6801,7 +6766,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
if (!R->isIntegralType(Context) && !R->isPointerType()) {
- Diag(D.getLocStart(), diag::err_asm_bad_register_type);
+ Diag(D.getBeginLoc(), diag::err_asm_bad_register_type);
NewVD->setInvalidDecl(true);
}
}
@@ -6931,12 +6896,12 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// C++ [basic.start.main]p3
// A program that declares a variable main at global scope is ill-formed.
if (getLangOpts().CPlusPlus)
- Diag(D.getLocStart(), diag::err_main_global_variable);
+ Diag(D.getBeginLoc(), diag::err_main_global_variable);
// In C, and external-linkage variable named main results in undefined
// behavior.
else if (NewVD->hasExternalFormalLinkage())
- Diag(D.getLocStart(), diag::warn_main_redefined);
+ Diag(D.getBeginLoc(), diag::warn_main_redefined);
}
if (D.isRedeclaration() && !Previous.empty()) {
@@ -7382,19 +7347,23 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
}
- // OpenCL v1.2 s6.5 - All program scope variables must be declared in the
+ // OpenCL C v1.2 s6.5 - All program scope variables must be declared in the
// __constant address space.
- // OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
+ // OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
// variables inside a function can also be declared in the global
// address space.
+ // OpenCL C++ v1.0 s2.5 inherits rule from OpenCL C v2.0 and allows local
+ // address space additionally.
+ // FIXME: Add local AS for OpenCL C++.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
- getLangOpts().OpenCLVersion == 200))) {
+ (getLangOpts().OpenCLVersion == 200 ||
+ getLangOpts().OpenCLCPlusPlus)))) {
int Scope = NewVD->isStaticLocal() | NewVD->hasExternalStorage() << 1;
- if (getLangOpts().OpenCLVersion == 200)
+ if (getLangOpts().OpenCLVersion == 200 || getLangOpts().OpenCLCPlusPlus)
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "global or constant";
else
@@ -7747,8 +7716,10 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
SmallVector<std::pair<FunctionDecl *, unsigned>, 1> NearMatches;
TypoCorrection Correction;
bool IsDefinition = ExtraArgs.D.isFunctionDefinition();
- unsigned DiagMsg = IsLocalFriend ? diag::err_no_matching_local_friend
- : diag::err_member_decl_does_not_match;
+ unsigned DiagMsg =
+ IsLocalFriend ? diag::err_no_matching_local_friend :
+ NewFD->getFriendObjectKind() ? diag::err_qualified_friend_no_match :
+ diag::err_member_decl_does_not_match;
LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
IsLocalFriend ? Sema::LookupLocalFriendName
: Sema::LookupOrdinaryName,
@@ -7938,10 +7909,8 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
(D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
(!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
- NewFD = FunctionDecl::Create(SemaRef.Context, DC,
- D.getLocStart(), NameInfo, R,
- TInfo, SC, isInline,
- HasPrototype, false);
+ NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
+ R, TInfo, SC, isInline, HasPrototype, false);
if (D.isInvalidType())
NewFD->setInvalidDecl();
@@ -7966,31 +7935,26 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
"Constructors can only be declared in a member context");
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
- return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
- D.getLocStart(), NameInfo,
- R, TInfo, isExplicit, isInline,
- /*isImplicitlyDeclared=*/false,
- isConstexpr);
+ return CXXConstructorDecl::Create(
+ SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
+ TInfo, isExplicit, isInline,
+ /*isImplicitlyDeclared=*/false, isConstexpr);
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
if (DC->isRecord()) {
R = SemaRef.CheckDestructorDeclarator(D, R, SC);
CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
- CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
- SemaRef.Context, Record,
- D.getLocStart(),
- NameInfo, R, TInfo, isInline,
- /*isImplicitlyDeclared=*/false);
-
- // If the class is complete, then we now create the implicit exception
- // specification. If the class is incomplete or dependent, we can't do
- // it yet.
- if (SemaRef.getLangOpts().CPlusPlus11 && !Record->isDependentType() &&
- Record->getDefinition() && !Record->isBeingDefined() &&
- R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) {
- SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
- }
+ CXXDestructorDecl *NewDD =
+ CXXDestructorDecl::Create(SemaRef.Context, Record, D.getBeginLoc(),
+ NameInfo, R, TInfo, isInline,
+ /*isImplicitlyDeclared=*/false);
+
+ // If the destructor needs an implicit exception specification, set it
+ // now. FIXME: It'd be nice to be able to create the right type to start
+ // with, but the type needs to reference the destructor declaration.
+ if (SemaRef.getLangOpts().CPlusPlus11)
+ SemaRef.AdjustDestructorExceptionSpec(NewDD);
IsVirtualOkay = true;
return NewDD;
@@ -8001,10 +7965,9 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// Create a FunctionDecl to satisfy the function definition parsing
// code path.
- return FunctionDecl::Create(SemaRef.Context, DC,
- D.getLocStart(),
- D.getIdentifierLoc(), Name, R, TInfo,
- SC, isInline,
+ return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
+ D.getIdentifierLoc(), Name, R, TInfo, SC,
+ isInline,
/*hasPrototype=*/true, isConstexpr);
}
@@ -8017,17 +7980,16 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
SemaRef.CheckConversionDeclarator(D, R, SC);
IsVirtualOkay = true;
- return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
- D.getLocStart(), NameInfo,
- R, TInfo, isInline, isExplicit,
- isConstexpr, SourceLocation());
+ return CXXConversionDecl::Create(
+ SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
+ TInfo, isInline, isExplicit, isConstexpr, SourceLocation());
} else if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
- return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getLocStart(),
+ return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
isExplicit, NameInfo, R, TInfo,
- D.getLocEnd());
+ D.getEndLoc());
} else if (DC->isRecord()) {
// If the name of the function is the same as the name of the record,
// then this must be an invalid constructor that has a return type.
@@ -8042,11 +8004,9 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
}
// This is a C++ method declaration.
- CXXMethodDecl *Ret = CXXMethodDecl::Create(SemaRef.Context,
- cast<CXXRecordDecl>(DC),
- D.getLocStart(), NameInfo, R,
- TInfo, SC, isInline,
- isConstexpr, SourceLocation());
+ CXXMethodDecl *Ret = CXXMethodDecl::Create(
+ SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
+ TInfo, SC, isInline, isConstexpr, SourceLocation());
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
@@ -8058,10 +8018,9 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// Determine whether the function was written with a
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
- return FunctionDecl::Create(SemaRef.Context, DC,
- D.getLocStart(),
- NameInfo, R, TInfo, SC, isInline,
- true/*HasPrototype*/, isConstexpr);
+ return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
+ R, TInfo, SC, isInline, true /*HasPrototype*/,
+ isConstexpr);
}
}
@@ -8137,7 +8096,7 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
const Type *UnderlyingTy = PT->getPointeeOrArrayElementType();
// Call ourself to check an underlying type of an array. Since the
// getPointeeOrArrayElementType returns an innermost type which is not an
- // array, this recusive call only happens once.
+ // array, this recursive call only happens once.
return getOpenCLKernelParameterType(S, QualType(UnderlyingTy, 0));
}
@@ -8415,7 +8374,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
}
- SetNestedNameSpecifier(NewFD, D);
+ SetNestedNameSpecifier(*this, NewFD, D);
isMemberSpecialization = false;
isFunctionTemplateSpecialization = false;
if (D.isInvalidType())
@@ -8426,7 +8385,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
bool Invalid = false;
if (TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
- D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
+ D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
@@ -8496,8 +8455,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< FixItHint::CreateInsertion(InsertLoc, "<>");
}
}
- }
- else {
+ } else {
// All template param lists were matched against the scope specifier:
// this is NOT (an explicit specialization of) a template.
if (TemplateParamLists.size() > 0)
@@ -9067,10 +9025,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// selecting a friend based on a dependent factor. But there
// are situations where these conditions don't apply and we
// can actually do this check immediately.
+ //
+ // Unless the scope is dependent, it's always an error if qualified
+ // redeclaration lookup found nothing at all. Diagnose that now;
+ // nothing will diagnose that error later.
if (isFriend &&
- (TemplateParamLists.size() ||
- D.getCXXScopeSpec().getScopeRep()->isDependent() ||
- CurContext->isDependentContext())) {
+ (D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ (!Previous.empty() && (TemplateParamLists.size() ||
+ CurContext->isDependentContext())))) {
// ignore these
} else {
// The user tried to provide an out-of-line definition for a
@@ -9336,6 +9298,39 @@ Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
}
return nullptr;
}
+
+/// Determines if we can perform a correct type check for \p D as a
+/// redeclaration of \p PrevDecl. If not, we can generally still perform a
+/// best-effort check.
+///
+/// \param NewD The new declaration.
+/// \param OldD The old declaration.
+/// \param NewT The portion of the type of the new declaration to check.
+/// \param OldT The portion of the type of the old declaration to check.
+bool Sema::canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
+ QualType NewT, QualType OldT) {
+ if (!NewD->getLexicalDeclContext()->isDependentContext())
+ return true;
+
+ // For dependently-typed local extern declarations and friends, we can't
+ // perform a correct type check in general until instantiation:
+ //
+ // int f();
+ // template<typename T> void g() { T f(); }
+ //
+ // (valid if g() is only instantiated with T = int).
+ if (NewT->isDependentType() &&
+ (NewD->isLocalExternDecl() || NewD->getFriendObjectKind()))
+ return false;
+
+ // Similarly, if the previous declaration was a dependent local extern
+ // declaration, we don't really know its type yet.
+ if (OldT->isDependentType() && OldD->isLocalExternDecl())
+ return false;
+
+ return true;
+}
+
/// Checks if the new declaration declared in dependent context must be
/// put in the same redeclaration chain as the specified declaration.
///
@@ -9346,36 +9341,32 @@ Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
/// belongs to.
///
bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
- // Any declarations should be put into redeclaration chains except for
- // friend declaration in a dependent context that names a function in
- // namespace scope.
+ if (!D->getLexicalDeclContext()->isDependentContext())
+ return true;
+
+ // Don't chain dependent friend function definitions until instantiation, to
+ // permit cases like
//
- // This allows to compile code like:
+ // void func();
+ // template<typename T> class C1 { friend void func() {} };
+ // template<typename T> class C2 { friend void func() {} };
//
- // void func();
- // template<typename T> class C1 { friend void func() { } };
- // template<typename T> class C2 { friend void func() { } };
+ // ... which is valid if only one of C1 and C2 is ever instantiated.
//
- // This code snippet is a valid code unless both templates are instantiated.
- return !(D->getLexicalDeclContext()->isDependentContext() &&
- D->getDeclContext()->isFileContext() &&
- D->getFriendObjectKind() != Decl::FOK_None);
-}
+ // FIXME: This need only apply to function definitions. For now, we proxy
+ // this by checking for a file-scope function. We do not want this to apply
+ // to friend declarations nominating member functions, because that gets in
+ // the way of access checks.
+ if (D->getFriendObjectKind() && D->getDeclContext()->isFileContext())
+ return false;
-namespace MultiVersioning {
-enum Type { None, Target, CPUSpecific, CPUDispatch};
-} // MultiVersionType
-
-static MultiVersioning::Type
-getMultiVersionType(const FunctionDecl *FD) {
- if (FD->hasAttr<TargetAttr>())
- return MultiVersioning::Target;
- if (FD->hasAttr<CPUDispatchAttr>())
- return MultiVersioning::CPUDispatch;
- if (FD->hasAttr<CPUSpecificAttr>())
- return MultiVersioning::CPUSpecific;
- return MultiVersioning::None;
+ auto *VD = dyn_cast<ValueDecl>(D);
+ auto *PrevVD = dyn_cast<ValueDecl>(PrevDecl);
+ return !VD || !PrevVD ||
+ canFullyTypeCheckRedeclaration(VD, PrevVD, VD->getType(),
+ PrevVD->getType());
}
+
/// Check the target attribute of the function for MultiVersion
/// validity.
///
@@ -9412,10 +9403,31 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
return false;
}
+static bool HasNonMultiVersionAttributes(const FunctionDecl *FD,
+ MultiVersionKind MVType) {
+ for (const Attr *A : FD->attrs()) {
+ switch (A->getKind()) {
+ case attr::CPUDispatch:
+ case attr::CPUSpecific:
+ if (MVType != MultiVersionKind::CPUDispatch &&
+ MVType != MultiVersionKind::CPUSpecific)
+ return true;
+ break;
+ case attr::Target:
+ if (MVType != MultiVersionKind::Target)
+ return true;
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
- MultiVersioning::Type MVType) {
+ MultiVersionKind MVType) {
enum DoesntSupport {
FuncTemplates = 0,
VirtFuncs = 1,
@@ -9436,8 +9448,8 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
};
bool IsCPUSpecificCPUDispatchMVType =
- MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific;
+ MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific;
if (OldFD && !OldFD->getType()->getAs<FunctionProtoType>()) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_noproto);
@@ -9457,15 +9469,14 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
// For now, disallow all other attributes. These should be opt-in, but
// an analysis of all of them is a future FIXME.
- if (CausesMV && OldFD &&
- std::distance(OldFD->attr_begin(), OldFD->attr_end()) != 1) {
+ if (CausesMV && OldFD && HasNonMultiVersionAttributes(OldFD, MVType)) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
- if (std::distance(NewFD->attr_begin(), NewFD->attr_end()) != 1)
+ if (HasNonMultiVersionAttributes(NewFD, MVType))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
@@ -9498,8 +9509,8 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DefaultedFuncs;
- if (NewFD->isConstexpr() && (MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific))
+ if (NewFD->isConstexpr() && (MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << ConstexprFuncs;
@@ -9563,19 +9574,19 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
- MultiVersioning::Type MVType,
+ MultiVersionKind MVType,
const TargetAttr *TA,
const CPUDispatchAttr *CPUDisp,
const CPUSpecificAttr *CPUSpec) {
- assert(MVType != MultiVersioning::None &&
+ assert(MVType != MultiVersionKind::None &&
"Function lacks multiversion attribute");
// Target only causes MV if it is default, otherwise this is a normal
// function.
- if (MVType == MultiVersioning::Target && !TA->isDefaultVersion())
+ if (MVType == MultiVersionKind::Target && !TA->isDefaultVersion())
return false;
- if (MVType == MultiVersioning::Target && CheckMultiVersionValue(S, FD)) {
+ if (MVType == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
@@ -9589,6 +9600,15 @@ static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
return false;
}
+static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
+ for (const Decl *D = FD->getPreviousDecl(); D; D = D->getPreviousDecl()) {
+ if (D->getAsFunction()->getMultiVersionKind() != MultiVersionKind::None)
+ return true;
+ }
+
+ return false;
+}
+
static bool CheckTargetCausesMultiVersioning(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
@@ -9596,11 +9616,12 @@ static bool CheckTargetCausesMultiVersioning(
const auto *OldTA = OldFD->getAttr<TargetAttr>();
TargetAttr::ParsedTargetAttr NewParsed = NewTA->parse();
// Sort order doesn't matter, it just needs to be consistent.
- llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+ llvm::sort(NewParsed.Features);
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
- if (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())
+ if (!NewTA->isDefaultVersion() &&
+ (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr()))
return false;
// Otherwise, this decl causes MultiVersioning.
@@ -9612,7 +9633,7 @@ static bool CheckTargetCausesMultiVersioning(
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
- MultiVersioning::Target)) {
+ MultiVersionKind::Target)) {
NewFD->setInvalidDecl();
return true;
}
@@ -9622,6 +9643,15 @@ static bool CheckTargetCausesMultiVersioning(
return true;
}
+ // If this is 'default', permit the forward declaration.
+ if (!OldFD->isMultiVersion() && !OldTA && NewTA->isDefaultVersion()) {
+ Redeclaration = true;
+ OldDecl = OldFD;
+ OldFD->setIsMultiVersion();
+ NewFD->setIsMultiVersion();
+ return false;
+ }
+
if (CheckMultiVersionValue(S, OldFD)) {
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
@@ -9640,7 +9670,10 @@ static bool CheckTargetCausesMultiVersioning(
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
- if (!CurTA || CurTA->isInherited()) {
+ // We allow forward declarations before ANY multiversioning attributes, but
+ // nothing after the fact.
+ if (PreviousDeclsHaveMultiVersionAttribute(FD) &&
+ (!CurTA || CurTA->isInherited())) {
S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
<< 0;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
@@ -9662,17 +9695,17 @@ static bool CheckTargetCausesMultiVersioning(
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersioning::Type NewMVType, const TargetAttr *NewTA,
+ MultiVersionKind NewMVType, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
- MultiVersioning::Type OldMVType = getMultiVersionType(OldFD);
+ MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
- if ((OldMVType == MultiVersioning::Target &&
- NewMVType != MultiVersioning::Target) ||
- (NewMVType == MultiVersioning::Target &&
- OldMVType != MultiVersioning::Target)) {
+ if ((OldMVType == MultiVersionKind::Target &&
+ NewMVType != MultiVersionKind::Target) ||
+ (NewMVType == MultiVersionKind::Target &&
+ OldMVType != MultiVersionKind::Target)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@@ -9682,7 +9715,7 @@ static bool CheckMultiVersionAdditionalDecl(
TargetAttr::ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = NewTA->parse();
- llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
+ llvm::sort(NewParsed.Features);
}
bool UseMemberUsingDeclRules =
@@ -9697,7 +9730,7 @@ static bool CheckMultiVersionAdditionalDecl(
if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- if (NewMVType == MultiVersioning::Target) {
+ if (NewMVType == MultiVersionKind::Target) {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
@@ -9720,7 +9753,7 @@ static bool CheckMultiVersionAdditionalDecl(
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
- if (NewMVType == MultiVersioning::CPUDispatch &&
+ if (NewMVType == MultiVersionKind::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
@@ -9741,7 +9774,7 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setInvalidDecl();
return true;
}
- if (NewMVType == MultiVersioning::CPUSpecific && CurCPUSpec) {
+ if (NewMVType == MultiVersionKind::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
@@ -9777,17 +9810,27 @@ static bool CheckMultiVersionAdditionalDecl(
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
- if (NewMVType == MultiVersioning::Target &&
+ if (NewMVType == MultiVersionKind::Target &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
- if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, false, NewMVType)) {
+ if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD,
+ !OldFD->isMultiVersion(), NewMVType)) {
NewFD->setInvalidDecl();
return true;
}
+ // Permit forward declarations in the case where these two are compatible.
+ if (!OldFD->isMultiVersion()) {
+ OldFD->setIsMultiVersion();
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = OldFD;
+ return false;
+ }
+
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
@@ -9819,14 +9862,14 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return true;
}
- MultiVersioning::Type MVType = getMultiVersionType(NewFD);
+ MultiVersionKind MVType = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
- if ((MVType == MultiVersioning::Target && NewTA->isDefaultVersion()) ||
- MVType == MultiVersioning::CPUDispatch ||
- MVType == MultiVersioning::CPUSpecific) {
+ if ((MVType == MultiVersionKind::Target && NewTA->isDefaultVersion()) ||
+ MVType == MultiVersionKind::CPUDispatch ||
+ MVType == MultiVersionKind::CPUSpecific) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
@@ -9839,7 +9882,7 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
- if (MVType == MultiVersioning::None)
+ if (MVType == MultiVersionKind::None)
return false;
return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA, NewCPUDisp,
NewCPUSpec);
@@ -9847,29 +9890,21 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
FunctionDecl *OldFD = OldDecl->getAsFunction();
- if (!OldFD->isMultiVersion() && MVType == MultiVersioning::None)
+ if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
return false;
- if (OldFD->isMultiVersion() && MVType == MultiVersioning::None) {
+ if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
- << (getMultiVersionType(OldFD) != MultiVersioning::Target);
+ << (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
NewFD->setInvalidDecl();
return true;
}
// Handle the target potentially causes multiversioning case.
- if (!OldFD->isMultiVersion() && MVType == MultiVersioning::Target)
+ if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous);
- // Previous declarations lack CPUDispatch/CPUSpecific.
- if (!OldFD->isMultiVersion()) {
- S.Diag(OldFD->getLocation(), diag::err_multiversion_required_in_redecl)
- << 1;
- S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
- NewFD->setInvalidDecl();
- return true;
- }
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
@@ -9982,7 +10017,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
- (MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
+ !MD->getTypeQualifiers().hasConst()) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
@@ -9990,7 +10025,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
const FunctionProtoType *FPT =
MD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals |= Qualifiers::Const;
+ EPI.TypeQuals.addConst();
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
@@ -10022,11 +10057,17 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (FunctionTemplateDecl *OldTemplateDecl =
dyn_cast<FunctionTemplateDecl>(OldDecl)) {
auto *OldFD = OldTemplateDecl->getTemplatedDecl();
- NewFD->setPreviousDeclaration(OldFD);
- adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
+
+ // The call to MergeFunctionDecl above may have created some state in
+ // NewTemplateDecl that needs to be merged with OldTemplateDecl before we
+ // can add it as a redeclaration.
+ NewTemplateDecl->mergePrevDecl(OldTemplateDecl);
+
+ NewFD->setPreviousDeclaration(OldFD);
+ adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember()) {
NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
@@ -10116,7 +10157,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// A deduction guide is not on the list of entities that can be
// explicitly specialized.
if (Guide->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
- Diag(Guide->getLocStart(), diag::err_deduction_guide_specialized)
+ Diag(Guide->getBeginLoc(), diag::err_deduction_guide_specialized)
<< /*explicit specialization*/ 1;
}
@@ -10479,7 +10520,7 @@ namespace {
Expr *Base = E;
bool ReferenceField = false;
- // Get the field memebers used.
+ // Get the field members used.
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!FD)
@@ -10719,11 +10760,10 @@ namespace {
return;
}
- S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
+ S.DiagRuntimeBehavior(DRE->getBeginLoc(), DRE,
S.PDiag(diag)
- << DRE->getDecl()
- << OrigDecl->getLocation()
- << DRE->getSourceRange());
+ << DRE->getDecl() << OrigDecl->getLocation()
+ << DRE->getSourceRange());
}
};
@@ -10768,7 +10808,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeclarationName Name, QualType Type,
TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
- Expr *Init) {
+ Expr *&Init) {
bool IsInitCapture = !VDecl;
assert((!VDecl || !VDecl->isInitCapture()) &&
"init captures are expected to be deduced prior to initialization");
@@ -10822,7 +10862,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
if (DeduceInits.empty()) {
// It isn't possible to write this directly, but it is possible to
// end up in this situation with "auto x(some_pack...);"
- Diag(Init->getLocStart(), IsInitCapture
+ Diag(Init->getBeginLoc(), IsInitCapture
? diag::err_init_capture_no_expression
: diag::err_auto_var_init_no_expression)
<< VN << Type << Range;
@@ -10830,7 +10870,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
}
if (DeduceInits.size() > 1) {
- Diag(DeduceInits[1]->getLocStart(),
+ Diag(DeduceInits[1]->getBeginLoc(),
IsInitCapture ? diag::err_init_capture_multiple_expressions
: diag::err_auto_var_init_multiple_expressions)
<< VN << Type << Range;
@@ -10839,7 +10879,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
Expr *DeduceInit = DeduceInits[0];
if (DirectInit && isa<InitListExpr>(DeduceInit)) {
- Diag(Init->getLocStart(), IsInitCapture
+ Diag(Init->getBeginLoc(), IsInitCapture
? diag::err_init_capture_paren_braces
: diag::err_auto_var_init_paren_braces)
<< isa<InitListExpr>(Init) << VN << Type << Range;
@@ -10884,7 +10924,8 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
- }
+ } else
+ Init = DeduceInit;
// Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
// 'id' instead of a specific object type prevents most of our usual
@@ -10901,7 +10942,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
}
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
- Expr *Init) {
+ Expr *&Init) {
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
@@ -11144,7 +11185,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
if ((VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
VDecl->getType().isNonWeakInMRRWithObjCWeak(Context)) &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
- Init->getLocStart()))
+ Init->getBeginLoc()))
FSI->markSafeWeakUse(Init);
}
@@ -11159,9 +11200,9 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// struct T { S a, b; } t = { Temp(), Temp() }
//
// we should destroy the first Temp before constructing the second.
- ExprResult Result = ActOnFinishFullExpr(Init, VDecl->getLocation(),
- false,
- VDecl->isConstexpr());
+ ExprResult Result =
+ ActOnFinishFullExpr(Init, VDecl->getLocation(),
+ /*DiscardedValue*/ false, VDecl->isConstexpr());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
@@ -11276,9 +11317,9 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
Diag(VDecl->getLocation(),
diag::ext_in_class_initializer_float_type_cxx11)
<< DclT << Init->getSourceRange();
- Diag(VDecl->getLocStart(),
+ Diag(VDecl->getBeginLoc(),
diag::note_in_class_initializer_float_type_cxx11)
- << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+ << FixItHint::CreateInsertion(VDecl->getBeginLoc(), "constexpr ");
} else {
Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
<< DclT << Init->getSourceRange();
@@ -11293,8 +11334,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Suggest adding 'constexpr' in C++11 for literal types.
} else if (getLangOpts().CPlusPlus11 && DclT->isLiteralType(Context)) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
- << DclT << Init->getSourceRange()
- << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+ << DclT << Init->getSourceRange()
+ << FixItHint::CreateInsertion(VDecl->getBeginLoc(), "constexpr ");
VDecl->setConstexpr(true);
} else {
@@ -11407,8 +11448,9 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
+ Expr *TmpInit = nullptr;
if (Type->isUndeducedType() &&
- DeduceVariableDeclarationType(Var, false, nullptr))
+ DeduceVariableDeclarationType(Var, false, TmpInit))
return;
// C++11 [class.static.data]p3: A static data member can be declared with
@@ -11708,7 +11750,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// In Objective-C, don't allow jumps past the implicit initialization of a
// local retaining variable.
- if (getLangOpts().ObjC1 &&
+ if (getLangOpts().ObjC &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
@@ -11827,29 +11869,8 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
QualType type = var->getType();
if (type->isDependentType()) return;
- // __block variables might require us to capture a copy-initializer.
- if (var->hasAttr<BlocksAttr>()) {
- // It's currently invalid to ever have a __block variable with an
- // array type; should we diagnose that here?
-
- // Regardless, we don't want to ignore array nesting when
- // constructing this copy.
- if (type->isStructureOrClassType()) {
- EnterExpressionEvaluationContext scope(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated);
- SourceLocation poi = var->getLocation();
- Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
- ExprResult result
- = PerformMoveOrCopyInitialization(
- InitializedEntity::InitializeBlock(poi, type, false),
- var, var->getType(), varRef, /*AllowNRVO=*/true);
- if (!result.isInvalid()) {
- result = MaybeCreateExprWithCleanups(result);
- Expr *init = result.getAs<Expr>();
- Context.setBlockVarCopyInits(var, init);
- }
- }
- }
+ if (var->hasAttr<BlocksAttr>())
+ getCurFunction()->addByrefBlockVar(var);
Expr *Init = var->getInit();
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
@@ -11940,6 +11961,49 @@ static bool hasDependentAlignment(VarDecl *VD) {
return false;
}
+/// Check if VD needs to be dllexport/dllimport due to being in a
+/// dllexport/import function.
+void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
+ assert(VD->isStaticLocal());
+
+ auto *FD = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
+
+ // Find outermost function when VD is in lambda function.
+ while (FD && !getDLLAttr(FD) &&
+ !FD->hasAttr<DLLExportStaticLocalAttr>() &&
+ !FD->hasAttr<DLLImportStaticLocalAttr>()) {
+ FD = dyn_cast_or_null<FunctionDecl>(FD->getParentFunctionOrMethod());
+ }
+
+ if (!FD)
+ return;
+
+ // Static locals inherit dll attributes from their function.
+ if (Attr *A = getDLLAttr(FD)) {
+ auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+ } else if (Attr *A = FD->getAttr<DLLExportStaticLocalAttr>()) {
+ auto *NewAttr = ::new (getASTContext()) DLLExportAttr(A->getRange(),
+ getASTContext(),
+ A->getSpellingListIndex());
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+
+ // Export this function to enforce exporting this static variable even
+ // if it is not used in this compilation unit.
+ if (!FD->hasAttr<DLLExportAttr>())
+ FD->addAttr(NewAttr);
+
+ } else if (Attr *A = FD->getAttr<DLLImportStaticLocalAttr>()) {
+ auto *NewAttr = ::new (getASTContext()) DLLImportAttr(A->getRange(),
+ getASTContext(),
+ A->getSpellingListIndex());
+ NewAttr->setInherited(true);
+ VD->addAttr(NewAttr);
+ }
+}
+
/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
@@ -11993,14 +12057,9 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
}
if (VD->isStaticLocal()) {
- if (FunctionDecl *FD =
- dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
- // Static locals inherit dll attributes from their function.
- if (Attr *A = getDLLAttr(FD)) {
- auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
- NewAttr->setInherited(true);
- VD->addAttr(NewAttr);
- }
+ CheckStaticLocalForDllExport(VD);
+
+ if (dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
// CUDA 8.0 E.3.9.4: Within the body of a __device__ or __global__
// function, only __shared__ variables or variables without any device
// memory qualifiers may be declared with static storage class.
@@ -12375,11 +12434,9 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
// Temporarily put parameter variables in the translation unit, not
// the enclosing context. This prevents them from accidentally
// looking like class members in C++.
- ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(),
- D.getLocStart(),
- D.getIdentifierLoc(), II,
- parmDeclType, TInfo,
- SC);
+ ParmVarDecl *New =
+ CheckParameter(Context.getTranslationUnitDecl(), D.getBeginLoc(),
+ D.getIdentifierLoc(), II, parmDeclType, TInfo, SC);
if (D.isInvalidType())
New->setInvalidDecl();
@@ -12508,7 +12565,7 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// passed by reference.
if (T->isObjCObjectType()) {
SourceLocation TypeEndLoc =
- getLocForEndOfToken(TSInfo->getTypeLoc().getLocEnd());
+ getLocForEndOfToken(TSInfo->getTypeLoc().getEndLoc());
Diag(NameLoc,
diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
<< FixItHint::CreateInsertion(TypeEndLoc, "*");
@@ -12682,6 +12739,29 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
}
}
}
+
+ if (!Definition)
+ // Similar to friend functions a friend function template may be a
+ // definition and do not have a body if it is instantiated in a class
+ // template.
+ if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) {
+ for (auto I : FTD->redecls()) {
+ auto D = cast<FunctionTemplateDecl>(I);
+ if (D != FTD) {
+ assert(!D->isThisDeclarationADefinition() &&
+ "More than one definition in redeclaration chain");
+ if (D->getFriendObjectKind() != Decl::FOK_None)
+ if (FunctionTemplateDecl *FT =
+ D->getInstantiatedFromMemberTemplate()) {
+ if (FT->isThisDeclarationADefinition()) {
+ Definition = D->getTemplatedDecl();
+ break;
+ }
+ }
+ }
+ }
+ }
+
if (!Definition)
return;
@@ -12701,6 +12781,7 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
Definition->getDescribedFunctionTemplate() ||
Definition->getNumTemplateParameterLists())) {
SkipBody->ShouldSkip = true;
+ SkipBody->Previous = const_cast<FunctionDecl*>(Definition);
if (auto *TD = Definition->getDescribedFunctionTemplate())
makeMergedDefinitionVisible(TD);
makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition));
@@ -12772,6 +12853,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
PushFunctionScope();
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
return D;
}
@@ -12782,6 +12864,11 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
else
FD = cast<FunctionDecl>(D);
+ // Do not push if it is a lambda because one is already pushed when building
+ // the lambda in ActOnStartOfLambdaDefinition().
+ if (!isLambdaCallOperator(FD))
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
@@ -12990,6 +13077,21 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
return ActOnFinishFunctionBody(D, BodyArg, false);
}
+/// RAII object that pops an ExpressionEvaluationContext when exiting a function
+/// body.
+class ExitFunctionBodyRAII {
+public:
+ ExitFunctionBodyRAII(Sema &S, bool IsLambda) : S(S), IsLambda(IsLambda) {}
+ ~ExitFunctionBodyRAII() {
+ if (!IsLambda)
+ S.PopExpressionEvaluationContext();
+ }
+
+private:
+ Sema &S;
+ bool IsLambda = false;
+};
+
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
@@ -13000,6 +13102,11 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (getLangOpts().CoroutinesTS && getCurFunction()->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
+ // Do not call PopExpressionEvaluationContext() if it is a lambda because one
+ // is already popped when finishing the lambda in BuildLambdaExpr(). This is
+ // meant to pop the context added in ActOnStartOfFunctionDef().
+ ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
+
if (FD) {
FD->setBody(Body);
FD->setWillHaveBody(false);
@@ -13054,7 +13161,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (!FD->isInvalidDecl()) {
// Don't diagnose unused parameters of defaulted or deleted functions.
- if (!FD->isDeleted() && !FD->isDefaulted())
+ if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody())
DiagnoseUnusedParameters(FD->parameters());
DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
FD->getReturnType(), FD);
@@ -13116,7 +13223,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
if (!CmpndBody->body_empty())
- Diag(CmpndBody->body_front()->getLocStart(),
+ Diag(CmpndBody->body_front()->getBeginLoc(),
diag::warn_dispatch_body_ignored);
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
@@ -13149,7 +13256,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
if (!MD->isInvalidDecl()) {
- DiagnoseUnusedParameters(MD->parameters());
+ if (!MD->hasSkippedBody())
+ DiagnoseUnusedParameters(MD->parameters());
DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
MD->getReturnType(), MD);
@@ -13157,8 +13265,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
computeNRVO(Body, getCurFunction());
}
if (getCurFunction()->ObjCShouldCallSuper) {
- Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call)
- << MD->getSelector().getAsString();
+ Diag(MD->getEndLoc(), diag::warn_objc_missing_super_call)
+ << MD->getSelector().getAsString();
getCurFunction()->ObjCShouldCallSuper = false;
}
if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
@@ -13267,7 +13375,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (RegisterVariables)
continue;
if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
- Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function);
+ Diag(S->getBeginLoc(), diag::err_non_asm_stmt_in_naked_function);
Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
FD->setInvalidDecl();
break;
@@ -13354,15 +13462,17 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
}
// Extension in C99. Legal in C90, but warn about it.
- // OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
unsigned diag_id;
if (II.getName().startswith("__builtin_"))
diag_id = diag::warn_builtin_unknown;
- else if (getLangOpts().C99 || getLangOpts().OpenCL)
+ // OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
+ else if (getLangOpts().OpenCL)
+ diag_id = diag::err_opencl_implicit_function_decl;
+ else if (getLangOpts().C99)
diag_id = diag::ext_implicit_function_decl;
else
diag_id = diag::warn_implicit_function_decl;
- Diag(Loc, diag_id) << &II << getLangOpts().OpenCL;
+ Diag(Loc, diag_id) << &II;
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
@@ -13400,12 +13510,8 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
@@ -13573,11 +13679,9 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
}
// Scope manipulation handled by caller.
- TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
- D.getLocStart(),
- D.getIdentifierLoc(),
- D.getIdentifier(),
- TInfo);
+ TypedefDecl *NewTD =
+ TypedefDecl::Create(Context, CurContext, D.getBeginLoc(),
+ D.getIdentifierLoc(), D.getIdentifier(), TInfo);
// Bail out immediately if we have an invalid declaration.
if (D.isInvalidType()) {
@@ -13739,76 +13843,106 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
// struct class-key shall be used to refer to a class (clause 9)
// declared using the class or struct class-key.
TagTypeKind OldTag = Previous->getTagKind();
- if (!isDefinition || !isClassCompatTagKind(NewTag))
- if (OldTag == NewTag)
+ if (OldTag != NewTag &&
+ !(isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)))
+ return false;
+
+ // Tags are compatible, but we might still want to warn on mismatched tags.
+ // Non-class tags can't be mismatched at this point.
+ if (!isClassCompatTagKind(NewTag))
+ return true;
+
+ // Declarations for which -Wmismatched-tags is disabled are entirely ignored
+ // by our warning analysis. We don't want to warn about mismatches with (eg)
+ // declarations in system headers that are designed to be specialized, but if
+ // a user asks us to warn, we should warn if their code contains mismatched
+ // declarations.
+ auto IsIgnoredLoc = [&](SourceLocation Loc) {
+ return getDiagnostics().isIgnored(diag::warn_struct_class_tag_mismatch,
+ Loc);
+ };
+ if (IsIgnoredLoc(NewTagLoc))
+ return true;
+
+ auto IsIgnored = [&](const TagDecl *Tag) {
+ return IsIgnoredLoc(Tag->getLocation());
+ };
+ while (IsIgnored(Previous)) {
+ Previous = Previous->getPreviousDecl();
+ if (!Previous)
return true;
+ OldTag = Previous->getTagKind();
+ }
- if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) {
- // Warn about the struct/class tag mismatch.
- bool isTemplate = false;
- if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
- isTemplate = Record->getDescribedClassTemplate();
+ bool isTemplate = false;
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
+ isTemplate = Record->getDescribedClassTemplate();
- if (inTemplateInstantiation()) {
+ if (inTemplateInstantiation()) {
+ if (OldTag != NewTag) {
// In a template instantiation, do not offer fix-its for tag mismatches
// since they usually mess up the template instead of fixing the problem.
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
+ // FIXME: Note previous location?
+ }
+ return true;
+ }
+
+ if (isDefinition) {
+ // On definitions, check all previous tags and issue a fix-it for each
+ // one that doesn't match the current tag.
+ if (Previous->getDefinition()) {
+ // Don't suggest fix-its for redefinitions.
return true;
}
- if (isDefinition) {
- // On definitions, check previous tags and issue a fix-it for each
- // one that doesn't match the current tag.
- if (Previous->getDefinition()) {
- // Don't suggest fix-its for redefinitions.
- return true;
- }
+ bool previousMismatch = false;
+ for (const TagDecl *I : Previous->redecls()) {
+ if (I->getTagKind() != NewTag) {
+ // Ignore previous declarations for which the warning was disabled.
+ if (IsIgnored(I))
+ continue;
- bool previousMismatch = false;
- for (auto I : Previous->redecls()) {
- if (I->getTagKind() != NewTag) {
- if (!previousMismatch) {
- previousMismatch = true;
- Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
- << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
- << getRedeclDiagFromTagKind(I->getTagKind());
- }
- Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
- << getRedeclDiagFromTagKind(NewTag)
- << FixItHint::CreateReplacement(I->getInnerLocStart(),
- TypeWithKeyword::getTagTypeKindName(NewTag));
+ if (!previousMismatch) {
+ previousMismatch = true;
+ Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
+ << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
+ << getRedeclDiagFromTagKind(I->getTagKind());
}
+ Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(NewTag)
+ << FixItHint::CreateReplacement(I->getInnerLocStart(),
+ TypeWithKeyword::getTagTypeKindName(NewTag));
}
- return true;
- }
-
- // Check for a previous definition. If current tag and definition
- // are same type, do nothing. If no definition, but disagree with
- // with previous tag type, give a warning, but no fix-it.
- const TagDecl *Redecl = Previous->getDefinition() ?
- Previous->getDefinition() : Previous;
- if (Redecl->getTagKind() == NewTag) {
- return true;
}
+ return true;
+ }
+ // Identify the prevailing tag kind: this is the kind of the definition (if
+ // there is a non-ignored definition), or otherwise the kind of the prior
+ // (non-ignored) declaration.
+ const TagDecl *PrevDef = Previous->getDefinition();
+ if (PrevDef && IsIgnored(PrevDef))
+ PrevDef = nullptr;
+ const TagDecl *Redecl = PrevDef ? PrevDef : Previous;
+ if (Redecl->getTagKind() != NewTag) {
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
Diag(Redecl->getLocation(), diag::note_previous_use);
// If there is a previous definition, suggest a fix-it.
- if (Previous->getDefinition()) {
- Diag(NewTagLoc, diag::note_struct_class_suggestion)
- << getRedeclDiagFromTagKind(Redecl->getTagKind())
- << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
- TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
+ if (PrevDef) {
+ Diag(NewTagLoc, diag::note_struct_class_suggestion)
+ << getRedeclDiagFromTagKind(Redecl->getTagKind())
+ << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
+ TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
}
-
- return true;
}
- return false;
+
+ return true;
}
/// Add a minimal nested name specifier fixit hint to allow lookup of a tag name
@@ -14031,7 +14165,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition) {
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -14462,12 +14596,15 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// comparison.
SkipBody->CheckSameAsPrevious = true;
SkipBody->New = createTagFromNewDecl();
- SkipBody->Previous = Hidden;
+ SkipBody->Previous = Def;
+ return Def;
} else {
SkipBody->ShouldSkip = true;
+ SkipBody->Previous = Def;
makeMergedDefinitionVisible(Hidden);
+ // Carry on and handle it like a normal definition. We'll
+ // skip starting the definitiion later.
}
- return Def;
} else if (!IsExplicitSpecializationAfterInstantiation) {
// A redeclaration in function prototype scope in C isn't
// visible elsewhere, so merely issue a warning.
@@ -14606,7 +14743,7 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
- if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
+ if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC) &&
cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
@@ -14696,7 +14833,7 @@ CreateNewDecl:
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition) {
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -14758,7 +14895,7 @@ CreateNewDecl:
if (PrevDecl)
CheckRedeclarationModuleOwnership(New, PrevDecl);
- if (TUK == TUK_Definition)
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
@@ -14808,6 +14945,8 @@ CreateNewDecl:
if (auto RD = dyn_cast<RecordDecl>(New))
RD->completeDefinition();
return nullptr;
+ } else if (SkipBody && SkipBody->ShouldSkip) {
+ return SkipBody->Previous;
} else {
return New;
}
@@ -14868,12 +15007,11 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
// class itself; this is known as the injected-class-name. For
// purposes of access checking, the injected-class-name is treated
// as if it were a public member name.
- CXXRecordDecl *InjectedClassName
- = CXXRecordDecl::Create(Context, Record->getTagKind(), CurContext,
- Record->getLocStart(), Record->getLocation(),
- Record->getIdentifier(),
- /*PrevDecl=*/nullptr,
- /*DelayTypeCreation=*/true);
+ CXXRecordDecl *InjectedClassName = CXXRecordDecl::Create(
+ Context, Record->getTagKind(), CurContext, Record->getBeginLoc(),
+ Record->getLocation(), Record->getIdentifier(),
+ /*PrevDecl=*/nullptr,
+ /*DelayTypeCreation=*/true);
Context.getTypeDeclType(InjectedClassName, Record);
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
@@ -15080,22 +15218,6 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
}
}
- // TR 18037 does not allow fields to be declared with address spaces.
- if (T.getQualifiers().hasAddressSpace() ||
- T->isDependentAddressSpaceType() ||
- T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
- Diag(Loc, diag::err_field_with_address_space);
- D.setInvalidType();
- }
-
- // OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
- // used as structure or union field: image, sampler, event or block types.
- if (LangOpts.OpenCL && (T->isEventT() || T->isImageType() ||
- T->isSamplerT() || T->isBlockPointerType())) {
- Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
- D.setInvalidType();
- }
-
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
@@ -15140,7 +15262,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
bool Mutable
= (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
- SourceLocation TSSL = D.getLocStart();
+ SourceLocation TSSL = D.getBeginLoc();
FieldDecl *NewFD
= CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle,
TSSL, AS, PrevDecl, &D);
@@ -15207,12 +15329,30 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
}
}
- // OpenCL v1.2 s6.9.c: bitfields are not supported.
- if (BitWidth && getLangOpts().OpenCL) {
- Diag(Loc, diag::err_opencl_bitfields);
+ // TR 18037 does not allow fields to be declared with address space
+ if (T.getQualifiers().hasAddressSpace() || T->isDependentAddressSpaceType() ||
+ T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
+ Diag(Loc, diag::err_field_with_address_space);
+ Record->setInvalidDecl();
InvalidDecl = true;
}
+ if (LangOpts.OpenCL) {
+ // OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
+ // used as structure or union field: image, sampler, event or block types.
+ if (T->isEventT() || T->isImageType() || T->isSamplerT() ||
+ T->isBlockPointerType()) {
+ Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ }
+ // OpenCL v1.2 s6.9.c: bitfields are not supported.
+ if (BitWidth) {
+ Diag(Loc, diag::err_opencl_bitfields);
+ InvalidDecl = true;
+ }
+ }
+
// Anonymous bit-fields cannot be cv-qualified (CWG 2229).
if (!InvalidDecl && getLangOpts().CPlusPlus && !II && BitWidth &&
T.hasQualifiers()) {
@@ -15602,6 +15742,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
}
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+ CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(EnclosingDecl);
// Start counting up the number of named members; make sure to include
// members of anonymous structs and unions in the total.
@@ -15691,9 +15832,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// virtual bases after the derived members. This would make a flexible
// array member declared at the end of an object not adjacent to the end
// of the type.
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record))
- if (RD->getNumVBases() != 0)
- Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
+ if (CXXRecord && CXXRecord->getNumVBases() != 0)
+ Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
<< FD->getDeclName() << Record->getTagKind();
if (!getLangOpts().C99)
Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
@@ -15784,7 +15924,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
}
ObjCFieldLifetimeErrReported = true;
}
- } else if (getLangOpts().ObjC1 &&
+ } else if (getLangOpts().ObjC &&
getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
@@ -15831,7 +15971,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Okay, we successfully defined 'Record'.
if (Record) {
bool Completed = false;
- if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ if (CXXRecord) {
if (!CXXRecord->isInvalidDecl()) {
// Set access bits correctly on the directly-declared conversions.
for (CXXRecordDecl::conversion_iterator
@@ -15841,13 +15981,6 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
}
if (!CXXRecord->isDependentType()) {
- if (CXXRecord->hasUserDeclaredDestructor()) {
- // Adjust user-defined destructor exception spec.
- if (getLangOpts().CPlusPlus11)
- AdjustDestructorExceptionSpec(CXXRecord,
- CXXRecord->getDestructor());
- }
-
// Add any implicitly-declared members to this class.
AddImplicitlyDeclaredMembersToClass(CXXRecord);
@@ -15902,7 +16035,7 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ProcessDeclAttributeList(S, Record, Attrs);
// We may have deferred checking for a deleted destructor. Check now.
- if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ if (CXXRecord) {
auto *Dtor = CXXRecord->getDestructor();
if (Dtor && Dtor->isImplicit() &&
ShouldDeleteSpecialMember(Dtor, CXXDestructor)) {
@@ -16289,8 +16422,10 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
// Verify that there isn't already something declared with this name in this
// scope.
- NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ LookupResult R(*this, Id, IdLoc, LookupOrdinaryName, ForVisibleRedeclaration);
+ LookupName(R, S);
+ NamedDecl *PrevDecl = R.getAsSingle<NamedDecl>();
+
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
@@ -16313,6 +16448,11 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
return nullptr;
if (PrevDecl) {
+ if (!TheEnumDecl->isScoped() && isa<ValueDecl>(PrevDecl)) {
+ // Check for other kinds of shadowing not already handled.
+ CheckShadow(New, PrevDecl, R);
+ }
+
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
@@ -16399,7 +16539,7 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
- typedef llvm::DenseMap<int64_t, DeclOrVector> ValueToVectorMap;
+ typedef std::unordered_map<int64_t, DeclOrVector> ValueToVectorMap;
// Use int64_t as a key to avoid needing special handling for DenseMap keys.
auto EnumConstantToKey = [](const EnumConstantDecl *D) {
@@ -16570,7 +16710,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
- // Keep track of whether every enum element has type int (very commmon).
+ // Keep track of whether every enum element has type int (very common).
if (AllElementsInt)
AllElementsInt = ECD->getType() == Context.IntTy;
}
@@ -16776,7 +16916,7 @@ static void checkModuleImportContext(Sema &S, Module *M,
switch (LSD->getLanguage()) {
case LinkageSpecDecl::lang_c:
if (ExternCLoc.isInvalid())
- ExternCLoc = LSD->getLocStart();
+ ExternCLoc = LSD->getBeginLoc();
break;
case LinkageSpecDecl::lang_cxx:
break;
@@ -16792,8 +16932,9 @@ static void checkModuleImportContext(Sema &S, Module *M,
? diag::ext_module_import_not_at_top_level_noop
: diag::err_module_import_not_at_top_level_fatal)
<< M->getFullModuleName() << DC;
- S.Diag(cast<Decl>(DC)->getLocStart(),
- diag::note_module_import_not_at_top_level) << DC;
+ S.Diag(cast<Decl>(DC)->getBeginLoc(),
+ diag::note_module_import_not_at_top_level)
+ << DC;
} else if (!M->IsExternC && ExternCLoc.isValid()) {
S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
<< M->getFullModuleName();
@@ -16830,6 +16971,10 @@ Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
case LangOptions::CMK_ModuleMap:
Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
return nullptr;
+
+ case LangOptions::CMK_HeaderModule:
+ Diag(ModuleLoc, diag::err_module_decl_in_header_module);
+ return nullptr;
}
assert(ModuleScopes.size() == 1 && "expected to be at global module scope");
@@ -16898,7 +17043,8 @@ Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
case ModuleDeclKind::Implementation:
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
PP.getIdentifierInfo(ModuleName), Path[0].second);
- Mod = getModuleLoader().loadModule(ModuleLoc, Path, Module::AllVisible,
+ Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
+ Module::AllVisible,
/*IsIncludeDirective=*/false);
if (!Mod) {
Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
@@ -16928,6 +17074,19 @@ Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ImportLoc,
ModuleIdPath Path) {
+ // Flatten the module path for a Modules TS module name.
+ std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc;
+ if (getLangOpts().ModulesTS) {
+ std::string ModuleName;
+ for (auto &Piece : Path) {
+ if (!ModuleName.empty())
+ ModuleName += ".";
+ ModuleName += Piece.first->getName();
+ }
+ ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
+ Path = ModuleIdPath(ModuleNameLoc);
+ }
+
Module *Mod =
getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
/*IsIncludeDirective=*/false);
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 77deed6047f4..0e10804a2ec7 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -93,6 +93,17 @@ static unsigned getFunctionOrMethodNumParams(const Decl *D) {
return cast<ObjCMethodDecl>(D)->param_size();
}
+static const ParmVarDecl *getFunctionOrMethodParam(const Decl *D,
+ unsigned Idx) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getParamDecl(Idx);
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getParamDecl(Idx);
+ if (const auto *BD = dyn_cast<BlockDecl>(D))
+ return BD->getParamDecl(Idx);
+ return nullptr;
+}
+
static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
if (const FunctionType *FnTy = D->getFunctionType())
return cast<FunctionProtoType>(FnTy)->getParamType(Idx);
@@ -103,12 +114,8 @@ static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) {
}
static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) {
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- return FD->getParamDecl(Idx)->getSourceRange();
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
- return MD->parameters()[Idx]->getSourceRange();
- if (const auto *BD = dyn_cast<BlockDecl>(D))
- return BD->getParamDecl(Idx)->getSourceRange();
+ if (auto *PVD = getFunctionOrMethodParam(D, Idx))
+ return PVD->getSourceRange();
return SourceRange();
}
@@ -182,7 +189,7 @@ static bool checkAttributeNumArgsImpl(Sema &S, const ParsedAttr &AL,
unsigned Num, unsigned Diag,
Compare Comp) {
if (Comp(getNumAttributeArgs(AL), Num)) {
- S.Diag(AL.getLoc(), Diag) << AL.getName() << Num;
+ S.Diag(AL.getLoc(), Diag) << AL << Num;
return false;
}
@@ -225,34 +232,25 @@ getAttrLoc(const AttrInfo &AL) {
}
static SourceLocation getAttrLoc(const ParsedAttr &AL) { return AL.getLoc(); }
-/// A helper function to provide Attribute Name for the Attr types
-/// AND the ParsedAttr.
-template <typename AttrInfo>
-static typename std::enable_if<std::is_base_of<Attr, AttrInfo>::value,
- const AttrInfo *>::type
-getAttrName(const AttrInfo &AL) {
- return &AL;
-}
-static const IdentifierInfo *getAttrName(const ParsedAttr &AL) {
- return AL.getName();
-}
-
/// If Expr is a valid integer constant, get the value of the integer
/// expression and return success or failure. May output an error.
+///
+/// Negative argument is implicitly converted to unsigned, unless
+/// \p StrictlyUnsigned is true.
template <typename AttrInfo>
static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
- uint32_t &Val, unsigned Idx = UINT_MAX) {
+ uint32_t &Val, unsigned Idx = UINT_MAX,
+ bool StrictlyUnsigned = false) {
llvm::APSInt I(32);
if (Expr->isTypeDependent() || Expr->isValueDependent() ||
!Expr->isIntegerConstantExpr(I, S.Context)) {
if (Idx != UINT_MAX)
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << getAttrName(AI) << Idx << AANT_ArgumentIntegerConstant
- << Expr->getSourceRange();
+ << AI << Idx << AANT_ArgumentIntegerConstant
+ << Expr->getSourceRange();
else
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_type)
- << getAttrName(AI) << AANT_ArgumentIntegerConstant
- << Expr->getSourceRange();
+ << AI << AANT_ArgumentIntegerConstant << Expr->getSourceRange();
return false;
}
@@ -262,6 +260,12 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
return false;
}
+ if (StrictlyUnsigned && I.isSigned() && I.isNegative()) {
+ S.Diag(getAttrLoc(AI), diag::err_attribute_requires_positive_integer)
+ << AI << /*non-negative*/ 1;
+ return false;
+ }
+
Val = (uint32_t)I.getZExtValue();
return true;
}
@@ -291,10 +295,19 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Ex
/// Diagnose mutually exclusive attributes when present on a given
/// declaration. Returns true if diagnosed.
template <typename AttrTy>
-static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
- IdentifierInfo *Ident) {
+static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(Range.getBegin(), diag::err_attributes_are_not_compatible) << Ident
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << A;
+ S.Diag(A->getLocation(), diag::note_conflicting_attribute);
+ return true;
+ }
+ return false;
+}
+
+template <typename AttrTy>
+static bool checkAttrMutualExclusion(Sema &S, Decl *D, const Attr &AL) {
+ if (const auto *A = D->getAttr<AttrTy>()) {
+ S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible) << &AL
<< A;
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
@@ -324,22 +337,21 @@ static bool checkFunctionOrMethodParameterIndex(
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
!IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
- << getAttrName(AI) << AttrArgNum << AANT_ArgumentIntegerConstant
- << IdxExpr->getSourceRange();
+ << &AI << AttrArgNum << AANT_ArgumentIntegerConstant
+ << IdxExpr->getSourceRange();
return false;
}
unsigned IdxSource = IdxInt.getLimitedValue(UINT_MAX);
if (IdxSource < 1 || (!IV && IdxSource > NumParams)) {
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_out_of_bounds)
- << getAttrName(AI) << AttrArgNum << IdxExpr->getSourceRange();
+ << &AI << AttrArgNum << IdxExpr->getSourceRange();
return false;
}
if (HasImplicitThisParam && !CanIndexImplicitThis) {
if (IdxSource == 1) {
- S.Diag(getAttrLoc(AI),
- diag::err_attribute_invalid_implicit_this_argument)
- << getAttrName(AI) << IdxExpr->getSourceRange();
+ S.Diag(getAttrLoc(AI), diag::err_attribute_invalid_implicit_this_argument)
+ << &AI << IdxExpr->getSourceRange();
return false;
}
}
@@ -359,7 +371,7 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
if (AL.isArgIdent(ArgNum)) {
IdentifierLoc *Loc = AL.getArgAsIdent(ArgNum);
Diag(Loc->Loc, diag::err_attribute_argument_type)
- << AL.getName() << AANT_ArgumentString
+ << AL << AANT_ArgumentString
<< FixItHint::CreateInsertion(Loc->Loc, "\"")
<< FixItHint::CreateInsertion(getLocForEndOfToken(Loc->Loc), "\"");
Str = Loc->Ident->getName();
@@ -372,11 +384,11 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
Expr *ArgExpr = AL.getArgAsExpr(ArgNum);
const auto *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
if (ArgLocation)
- *ArgLocation = ArgExpr->getLocStart();
+ *ArgLocation = ArgExpr->getBeginLoc();
if (!Literal || !Literal->isAscii()) {
- Diag(ArgExpr->getLocStart(), diag::err_attribute_argument_type)
- << AL.getName() << AANT_ArgumentString;
+ Diag(ArgExpr->getBeginLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentString;
return false;
}
@@ -387,9 +399,59 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
/// Applies the given attribute to the Decl without performing any
/// additional semantic checking.
template <typename AttrType>
+static void handleSimpleAttribute(Sema &S, Decl *D, SourceRange SR,
+ unsigned SpellingIndex) {
+ D->addAttr(::new (S.Context) AttrType(SR, S.Context, SpellingIndex));
+}
+
+template <typename AttrType>
static void handleSimpleAttribute(Sema &S, Decl *D, const ParsedAttr &AL) {
- D->addAttr(::new (S.Context) AttrType(AL.getRange(), S.Context,
- AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<AttrType>(S, D, AL.getRange(),
+ AL.getAttributeSpellingListIndex());
+}
+
+
+template <typename... DiagnosticArgs>
+static const Sema::SemaDiagnosticBuilder&
+appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr) {
+ return Bldr;
+}
+
+template <typename T, typename... DiagnosticArgs>
+static const Sema::SemaDiagnosticBuilder&
+appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr, T &&ExtraArg,
+ DiagnosticArgs &&... ExtraArgs) {
+ return appendDiagnostics(Bldr << std::forward<T>(ExtraArg),
+ std::forward<DiagnosticArgs>(ExtraArgs)...);
+}
+
+/// Add an attribute {@code AttrType} to declaration {@code D}, provided that
+/// {@code PassesCheck} is true.
+/// Otherwise, emit diagnostic {@code DiagID}, passing in all parameters
+/// specified in {@code ExtraArgs}.
+template <typename AttrType, typename... DiagnosticArgs>
+static void
+handleSimpleAttributeOrDiagnose(Sema &S, Decl *D, SourceRange SR,
+ unsigned SpellingIndex,
+ bool PassesCheck,
+ unsigned DiagID, DiagnosticArgs&&... ExtraArgs) {
+ if (!PassesCheck) {
+ Sema::SemaDiagnosticBuilder DB = S.Diag(D->getBeginLoc(), DiagID);
+ appendDiagnostics(DB, std::forward<DiagnosticArgs>(ExtraArgs)...);
+ return;
+ }
+ handleSimpleAttribute<AttrType>(S, D, SR, SpellingIndex);
+}
+
+template <typename AttrType, typename... DiagnosticArgs>
+static void
+handleSimpleAttributeOrDiagnose(Sema &S, Decl *D, const ParsedAttr &AL,
+ bool PassesCheck,
+ unsigned DiagID,
+ DiagnosticArgs&&... ExtraArgs) {
+ return handleSimpleAttributeOrDiagnose<AttrType>(
+ S, D, AL.getRange(), AL.getAttributeSpellingListIndex(), PassesCheck,
+ DiagID, std::forward<DiagnosticArgs>(ExtraArgs)...);
}
template <typename AttrType>
@@ -404,8 +466,7 @@ template <typename AttrType, typename IncompatibleAttrType,
typename... IncompatibleAttrTypes>
static void handleSimpleAttributeWithExclusions(Sema &S, Decl *D,
const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<IncompatibleAttrType>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<IncompatibleAttrType>(S, D, AL))
return;
handleSimpleAttributeWithExclusions<AttrType, IncompatibleAttrTypes...>(S, D,
AL);
@@ -421,17 +482,36 @@ static bool isIntOrBool(Expr *Exp) {
// Check to see if the type is a smart pointer of some kind. We assume
// it's a smart pointer if it defines both operator-> and operator*.
static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
- DeclContextLookupResult Res1 = RT->getDecl()->lookup(
- S.Context.DeclarationNames.getCXXOperatorName(OO_Star));
- if (Res1.empty())
- return false;
+ auto IsOverloadedOperatorPresent = [&S](const RecordDecl *Record,
+ OverloadedOperatorKind Op) {
+ DeclContextLookupResult Result =
+ Record->lookup(S.Context.DeclarationNames.getCXXOperatorName(Op));
+ return !Result.empty();
+ };
+
+ const RecordDecl *Record = RT->getDecl();
+ bool foundStarOperator = IsOverloadedOperatorPresent(Record, OO_Star);
+ bool foundArrowOperator = IsOverloadedOperatorPresent(Record, OO_Arrow);
+ if (foundStarOperator && foundArrowOperator)
+ return true;
- DeclContextLookupResult Res2 = RT->getDecl()->lookup(
- S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow));
- if (Res2.empty())
+ const CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record);
+ if (!CXXRecord)
return false;
- return true;
+ for (auto BaseSpecifier : CXXRecord->bases()) {
+ if (!foundStarOperator)
+ foundStarOperator = IsOverloadedOperatorPresent(
+ BaseSpecifier.getType()->getAsRecordDecl(), OO_Star);
+ if (!foundArrowOperator)
+ foundArrowOperator = IsOverloadedOperatorPresent(
+ BaseSpecifier.getType()->getAsRecordDecl(), OO_Arrow);
+ }
+
+ if (foundStarOperator && foundArrowOperator)
+ return true;
+
+ return false;
}
/// Check if passed in Decl is a pointer type.
@@ -455,8 +535,7 @@ static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D,
return true;
}
- S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_pointer)
- << AL.getName() << QT;
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_pointer) << AL << QT;
return false;
}
@@ -473,6 +552,29 @@ static const RecordType *getRecordType(QualType QT) {
return nullptr;
}
+template <typename AttrType>
+static bool checkRecordDeclForAttr(const RecordDecl *RD) {
+ // Check if the record itself has the attribute.
+ if (RD->hasAttr<AttrType>())
+ return true;
+
+ // Else check if any base classes have the attribute.
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ CXXBasePaths BPaths(false, false);
+ if (CRD->lookupInBases(
+ [](const CXXBaseSpecifier *BS, CXXBasePath &) {
+ const auto &Ty = *BS->getType();
+ // If it's type-dependent, we assume it could have the attribute.
+ if (Ty.isDependentType())
+ return true;
+ return Ty.getAs<RecordType>()->getDecl()->hasAttr<AttrType>();
+ },
+ BPaths, true))
+ return true;
+ }
+ return false;
+}
+
static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
const RecordType *RT = getRecordType(Ty);
@@ -488,21 +590,7 @@ static bool checkRecordTypeForCapability(Sema &S, QualType Ty) {
if (threadSafetyCheckIsSmartPointer(S, RT))
return true;
- // Check if the record itself has a capability.
- RecordDecl *RD = RT->getDecl();
- if (RD->hasAttr<CapabilityAttr>())
- return true;
-
- // Else check if any base classes have a capability.
- if (const auto *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- CXXBasePaths BPaths(false, false);
- if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &) {
- const auto *Type = BS->getType()->getAs<RecordType>();
- return Type->getDecl()->hasAttr<CapabilityAttr>();
- }, BPaths))
- return true;
- }
- return false;
+ return checkRecordDeclForAttr<CapabilityAttr>(RT->getDecl());
}
static bool checkTypedefTypeForCapability(QualType Ty) {
@@ -560,8 +648,27 @@ static bool isCapabilityExpr(Sema &S, const Expr *Ex) {
static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args,
- int Sidx = 0,
+ unsigned Sidx = 0,
bool ParamIdxOk = false) {
+ if (Sidx == AL.getNumArgs()) {
+ // If we don't have any capability arguments, the attribute implicitly
+ // refers to 'this'. So we need to make sure that 'this' exists, i.e. we're
+ // a non-static method, and that the class is a (scoped) capability.
+ const auto *MD = dyn_cast<const CXXMethodDecl>(D);
+ if (MD && !MD->isStatic()) {
+ const CXXRecordDecl *RD = MD->getParent();
+ // FIXME -- need to check this again on template instantiation
+ if (!checkRecordDeclForAttr<CapabilityAttr>(RD) &&
+ !checkRecordDeclForAttr<ScopedLockableAttr>(RD))
+ S.Diag(AL.getLoc(),
+ diag::warn_thread_attribute_not_on_capability_member)
+ << AL << MD->getParent();
+ } else {
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_not_on_non_static_member)
+ << AL;
+ }
+ }
+
for (unsigned Idx = Sidx; Idx < AL.getNumArgs(); ++Idx) {
Expr *ArgExp = AL.getArgAsExpr(Idx);
@@ -582,7 +689,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// We allow constant strings to be used as a placeholder for expressions
// that are not valid C++ syntax, but warn that they are ignored.
- S.Diag(AL.getLoc(), diag::warn_thread_attribute_ignored) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_ignored) << AL;
Args.push_back(ArgExp);
continue;
}
@@ -611,7 +718,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
uint64_t ParamIdxFromZero = ParamIdxFromOne - 1;
if (!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_range)
- << AL.getName() << Idx + 1 << NumParams;
+ << AL << Idx + 1 << NumParams;
continue;
}
ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
@@ -624,7 +731,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
// boolean logic expression. Eg) requires_capability(A || B && !C)
if (!typeHasCapability(S, ArgTy) && !isCapabilityExpr(S, ArgExp))
S.Diag(AL.getLoc(), diag::warn_thread_attribute_argument_not_lockable)
- << AL.getName() << ArgTy;
+ << AL << ArgTy;
Args.push_back(ArgExp);
}
@@ -686,8 +793,7 @@ static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
// Check that this attribute only applies to lockable types.
QualType QT = cast<ValueDecl>(D)->getType();
if (!QT->isDependentType() && !typeHasCapability(S, QT)) {
- S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_lockable)
- << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_thread_attribute_decl_not_lockable) << AL;
return false;
}
@@ -772,9 +878,9 @@ static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
const ParmVarDecl *Param = FD->getParamDecl(Idx.getASTIndex());
if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
- SourceLocation SrcLoc = AttrArg->getLocStart();
+ SourceLocation SrcLoc = AttrArg->getBeginLoc();
S.Diag(SrcLoc, diag::err_attribute_integers_only)
- << getAttrName(AI) << Param->getSourceRange();
+ << AI << Param->getSourceRange();
return false;
}
return true;
@@ -787,8 +893,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const auto *FD = cast<FunctionDecl>(D);
if (!FD->getReturnType()->isPointerType()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
- << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only) << AL;
return;
}
@@ -825,7 +930,7 @@ static bool checkTryLockFunAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
if (!isIntOrBool(AL.getArgAsExpr(0))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIntOrBool;
+ << AL << 1 << AANT_ArgumentIntOrBool;
return false;
}
@@ -907,8 +1012,7 @@ static bool checkFunctionConditionAttr(Sema &S, Decl *D, const ParsedAttr &AL,
if (isa<FunctionDecl>(D) && !Cond->isValueDependent() &&
!Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
Diags)) {
- S.Diag(AL.getLoc(), diag::err_attr_cond_never_constant_expr)
- << AL.getName();
+ S.Diag(AL.getLoc(), diag::err_attr_cond_never_constant_expr) << AL;
for (const PartialDiagnosticAt &PDiag : Diags)
S.Diag(PDiag.first, PDiag.second);
return false;
@@ -987,7 +1091,7 @@ static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
DiagnoseIfAttr::DiagnosticType DiagType;
if (!DiagnoseIfAttr::ConvertStrToDiagnosticType(DiagTypeStr, DiagType)) {
- S.Diag(AL.getArgAsExpr(2)->getLocStart(),
+ S.Diag(AL.getArgAsExpr(2)->getBeginLoc(),
diag::err_diagnose_if_invalid_diagnostic_type);
return;
}
@@ -1002,8 +1106,7 @@ static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->hasAttr<PassObjectSizeAttr>()) {
- S.Diag(D->getLocStart(), diag::err_attribute_only_once_per_parameter)
- << AL.getName();
+ S.Diag(D->getBeginLoc(), diag::err_attribute_only_once_per_parameter) << AL;
return;
}
@@ -1016,8 +1119,8 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// __builtin_object_size. So, it has the same constraints as that second
// argument; namely, it must be in the range [0, 3].
if (Type > 3) {
- S.Diag(E->getLocStart(), diag::err_attribute_argument_outof_range)
- << AL.getName() << 0 << 3 << E->getSourceRange();
+ S.Diag(E->getBeginLoc(), diag::err_attribute_argument_outof_range)
+ << AL << 0 << 3 << E->getSourceRange();
return;
}
@@ -1026,8 +1129,7 @@ static void handlePassObjectSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// At this point, we have no clue if `D` belongs to a function declaration or
// definition, so we defer the constness check until later.
if (!cast<ParmVarDecl>(D)->getType()->isPointerType()) {
- S.Diag(D->getLocStart(), diag::err_attribute_pointers_only)
- << AL.getName() << 1;
+ S.Diag(D->getBeginLoc(), diag::err_attribute_pointers_only) << AL << 1;
return;
}
@@ -1042,13 +1144,13 @@ static void handleConsumableAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *IL = AL.getArgAsIdent(0);
if (!ConsumableAttr::ConvertStrToConsumedState(IL->Ident->getName(),
DefaultState)) {
- S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << IL->Ident;
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << AL
+ << IL->Ident;
return;
}
} else {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL.getName() << AANT_ArgumentIdentifier;
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1059,8 +1161,7 @@ static void handleConsumableAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
const ParsedAttr &AL) {
- ASTContext &CurrContext = S.getASTContext();
- QualType ThisType = MD->getThisType(CurrContext)->getPointeeType();
+ QualType ThisType = MD->getThisType()->getPointeeType();
if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
if (!RD->hasAttr<ConsumableAttr>()) {
@@ -1098,8 +1199,7 @@ static void handleCallableWhenAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!CallableWhenAttr::ConvertStrToConsumedState(StateString,
CallableState)) {
- S.Diag(Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << StateString;
+ S.Diag(Loc, diag::warn_attribute_type_not_supported) << AL << StateString;
return;
}
@@ -1121,12 +1221,12 @@ static void handleParamTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!ParamTypestateAttr::ConvertStrToConsumedState(StateString,
ParamState)) {
S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << StateString;
+ << AL << StateString;
return;
}
} else {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
- AL.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1154,13 +1254,13 @@ static void handleReturnTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *IL = AL.getArgAsIdent(0);
if (!ReturnTypestateAttr::ConvertStrToConsumedState(IL->Ident->getName(),
ReturnState)) {
- S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << IL->Ident;
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << AL
+ << IL->Ident;
return;
}
} else {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
- AL.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1174,7 +1274,7 @@ static void handleReturnTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
//
//} else if (const CXXConstructorDecl *Constructor =
// dyn_cast<CXXConstructorDecl>(D)) {
- // ReturnType = Constructor->getThisType(S.getASTContext())->getPointeeType();
+ // ReturnType = Constructor->getThisType()->getPointeeType();
//
//} else {
//
@@ -1203,13 +1303,13 @@ static void handleSetTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *Ident = AL.getArgAsIdent(0);
StringRef Param = Ident->Ident->getName();
if (!SetTypestateAttr::ConvertStrToConsumedState(Param, NewState)) {
- S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << Param;
+ S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported) << AL
+ << Param;
return;
}
} else {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
- AL.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1227,13 +1327,13 @@ static void handleTestTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *Ident = AL.getArgAsIdent(0);
StringRef Param = Ident->Ident->getName();
if (!TestTypestateAttr::ConvertStrToConsumedState(Param, TestState)) {
- S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << Param;
+ S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported) << AL
+ << Param;
return;
}
} else {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) <<
- AL.getName() << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1261,7 +1361,7 @@ static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (BitfieldByteAligned)
// The PS4 target needs to maintain ABI backwards compatibility.
S.Diag(AL.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
- << AL.getName() << FD->getType();
+ << AL << FD->getType();
else
FD->addAttr(::new (S.Context) PackedAttr(
AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
@@ -1275,7 +1375,7 @@ static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
} else
- S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL;
}
static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -1285,19 +1385,19 @@ static bool checkIBOutletCommon(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *VD = dyn_cast<ObjCIvarDecl>(D)) {
if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
- << AL.getName() << VD->getType() << 0;
+ << AL << VD->getType() << 0;
return false;
}
}
else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
S.Diag(AL.getLoc(), diag::warn_iboutlet_object_type)
- << AL.getName() << PD->getType() << 1;
+ << AL << PD->getType() << 1;
return false;
}
}
else {
- S.Diag(AL.getLoc(), diag::warn_attribute_iboutlet) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_iboutlet) << AL;
return false;
}
@@ -1317,8 +1417,7 @@ static void handleIBOutletCollection(Sema &S, Decl *D, const ParsedAttr &AL) {
// The iboutletcollection attribute can have zero or one arguments.
if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
return;
}
@@ -1390,10 +1489,10 @@ static bool attrNonNullArgCheck(Sema &S, QualType T, const ParsedAttr &AL,
if (!S.isValidPointerAttrType(T)) {
if (isReturnValue)
S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
- << AL.getName() << AttrParmRange << TypeRange;
+ << AL << AttrParmRange << TypeRange;
else
S.Diag(AL.getLoc(), diag::warn_attribute_pointers_only)
- << AL.getName() << AttrParmRange << TypeRange << 0;
+ << AL << AttrParmRange << TypeRange << 0;
return false;
}
return true;
@@ -1486,7 +1585,7 @@ static void handleNoEscapeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
QualType T = cast<ParmVarDecl>(D)->getType();
if (!S.isValidPointerAttrType(T, /* RefOkay */ true)) {
S.Diag(AL.getLoc(), diag::warn_attribute_pointers_only)
- << AL.getName() << AL.getRange() << 0;
+ << AL << AL.getRange() << 0;
return;
}
@@ -1579,7 +1678,7 @@ void Sema::AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
QualType Ty = getFunctionOrMethodParamType(D, Idx.getASTIndex());
if (!Ty->isDependentType() && !Ty->isIntegralType(Context)) {
- Diag(ParamExpr->getLocStart(), diag::err_attribute_integers_only)
+ Diag(ParamExpr->getBeginLoc(), diag::err_attribute_integers_only)
<< &TmpAttr
<< FuncDecl->getParamDecl(Idx.getASTIndex())->getSourceRange();
return;
@@ -1611,7 +1710,7 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ << AL << 1 << AANT_ArgumentIdentifier;
return;
}
@@ -1625,15 +1724,13 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case OwnershipAttr::Takes:
case OwnershipAttr::Holds:
if (AL.getNumArgs() < 2) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments)
- << AL.getName() << 2;
+ S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments) << AL << 2;
return;
}
break;
case OwnershipAttr::Returns:
if (AL.getNumArgs() > 2) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
return;
}
break;
@@ -1668,8 +1765,8 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
break;
}
if (-1 != Err) {
- S.Diag(AL.getLoc(), diag::err_ownership_type) << AL.getName() << Err
- << Ex->getSourceRange();
+ S.Diag(AL.getLoc(), diag::err_ownership_type) << AL << Err
+ << Ex->getSourceRange();
return;
}
@@ -1679,8 +1776,7 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// index.
if (I->getOwnKind() != K && I->args_end() !=
std::find(I->args_begin(), I->args_end(), Idx)) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL.getName() << I;
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << I;
return;
} else if (K == OwnershipAttr::Returns &&
I->getOwnKind() == OwnershipAttr::Returns) {
@@ -1710,8 +1806,7 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleWeakRefAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check the attribute arguments.
if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
return;
}
@@ -1812,7 +1907,16 @@ static void handleAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- // FIXME: check if target symbol exists in current file
+ // Mark target used to prevent unneeded-internal-declaration warnings.
+ if (!S.LangOpts.CPlusPlus) {
+ // FIXME: demangle Str for C++, as the attribute refers to the mangled
+ // linkage name, not the pre-mangled identifier.
+ const DeclarationNameInfo target(&S.Context.Idents.get(Str), AL.getLoc());
+ LookupResult LR(S, target, Sema::LookupOrdinaryName);
+ if (S.LookupQualifiedName(LR, S.getCurLexicalContext()))
+ for (NamedDecl *ND : LR)
+ ND->markUsed(S.Context);
+ }
D->addAttr(::new (S.Context) AliasAttr(AL.getRange(), S.Context, Str,
AL.getAttributeSpellingListIndex()));
@@ -1846,11 +1950,19 @@ static void handleRestrictAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only)
- << AL.getName() << getFunctionOrMethodResultSourceRange(D);
+ << AL << getFunctionOrMethodResultSourceRange(D);
}
static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
FunctionDecl *FD = cast<FunctionDecl>(D);
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->getParent()->isLambda()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_dll_lambda) << AL;
+ return;
+ }
+ }
+
if (!checkAttributeAtLeastNumArgs(S, AL, 1))
return;
@@ -1858,7 +1970,7 @@ static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
for (unsigned ArgNo = 0; ArgNo < getNumAttributeArgs(AL); ++ArgNo) {
if (!AL.isArgIdent(ArgNo)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL.getName() << AANT_ArgumentIdentifier;
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -1896,18 +2008,16 @@ static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
- << AL.getName() << AttributeLangSupport::Cpp;
+ << AL << AttributeLangSupport::Cpp;
return;
}
- if (CommonAttr *CA = S.mergeCommonAttr(D, AL.getRange(), AL.getName(),
- AL.getAttributeSpellingListIndex()))
+ if (CommonAttr *CA = S.mergeCommonAttr(D, AL))
D->addAttr(CA);
}
static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL))
return;
if (AL.isDeclspecAttribute()) {
@@ -1916,7 +2026,7 @@ static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (Arch != llvm::Triple::x86 &&
(Arch != llvm::Triple::arm && Arch != llvm::Triple::thumb)) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_on_arch)
- << AL.getName() << Triple.getArchName();
+ << AL << Triple.getArchName();
return;
}
}
@@ -1930,7 +2040,7 @@ static void handleNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(Attrs.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attrs.getName() << ExpectedFunctionOrMethod;
+ << Attrs << ExpectedFunctionOrMethod;
return;
}
@@ -1957,7 +2067,7 @@ bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL.getName();
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored) << AL;
AL.setInvalid();
return true;
}
@@ -1973,10 +2083,10 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || (!VD->getType()->isBlockPointerType() &&
!VD->getType()->isFunctionPointerType())) {
- S.Diag(AL.getLoc(),
- AL.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type
- : diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionMethodOrBlock;
+ S.Diag(AL.getLoc(), AL.isCXX11Attribute()
+ ? diag::err_attribute_wrong_decl_type
+ : diag::warn_attribute_wrong_decl_type)
+ << AL << ExpectedFunctionMethodOrBlock;
return;
}
}
@@ -2065,7 +2175,7 @@ static void handleUnusedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// If this is spelled as the standard C++17 attribute, but not in C++17, warn
// about using it as an extension.
if (!S.getLangOpts().CPlusPlus17 && IsCXX17Attr)
- S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL.getName();
+ S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL;
D->addAttr(::new (S.Context) UnusedAttr(
AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
@@ -2108,7 +2218,7 @@ static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) {
S.Diag(AL.getLoc(), diag::err_objc_attr_protocol_requires_definition)
- << AL.getName() << AL.getRange();
+ << AL << AL.getRange();
return;
}
@@ -2365,6 +2475,15 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getReplacementExpr()))
Replacement = SE->getString();
+ if (II->isStr("swift")) {
+ if (Introduced.isValid() || Obsoleted.isValid() ||
+ (!IsUnavailable && !Deprecated.isValid())) {
+ S.Diag(AL.getLoc(),
+ diag::warn_availability_swift_unavailable_deprecated_only);
+ return;
+ }
+ }
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, AL.getRange(), II,
false/*Implicit*/,
Introduced.Version,
@@ -2506,8 +2625,7 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
bool isTypeVisibility) {
// Visibility attributes don't mean anything on a typedef.
if (isa<TypedefNameDecl>(D)) {
- S.Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored)
- << AL.getName();
+ S.Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored) << AL;
return;
}
@@ -2517,7 +2635,7 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
isa<ObjCInterfaceDecl>(D) ||
isa<NamespaceDecl>(D))) {
S.Diag(AL.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
- << AL.getName() << ExpectedTypeOrNamespace;
+ << AL << ExpectedTypeOrNamespace;
return;
}
@@ -2529,8 +2647,8 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
VisibilityAttr::VisibilityType type;
if (!VisibilityAttr::ConvertStrToVisibilityType(TypeStr, type)) {
- S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
- << AL.getName() << TypeStr;
+ S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported) << AL
+ << TypeStr;
return;
}
@@ -2559,15 +2677,14 @@ static void handleObjCMethodFamilyAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
const auto *M = cast<ObjCMethodDecl>(D);
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ << AL << 1 << AANT_ArgumentIdentifier;
return;
}
IdentifierLoc *IL = AL.getArgAsIdent(0);
ObjCMethodFamilyAttr::FamilyKind F;
if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) {
- S.Diag(IL->Loc, diag::warn_attribute_type_not_supported)
- << AL.getName() << IL->Ident;
+ S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << AL << IL->Ident;
return;
}
@@ -2631,15 +2748,14 @@ static void handleObjCIndependentClass(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleBlocksAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ << AL << 1 << AANT_ArgumentIdentifier;
return;
}
IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
BlocksAttr::BlockType type;
if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << II;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
return;
}
@@ -2656,8 +2772,7 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIntegerConstant
- << E->getSourceRange();
+ << AL << 1 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
}
@@ -2677,8 +2792,7 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isIntegerConstantExpr(Idx, S.Context)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 2 << AANT_ArgumentIntegerConstant
- << E->getSourceRange();
+ << AL << 2 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
}
nullPos = Idx.getZExtValue();
@@ -2726,12 +2840,12 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionMethodOrBlock;
+ << AL << ExpectedFunctionMethodOrBlock;
return;
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionMethodOrBlock;
+ << AL << ExpectedFunctionMethodOrBlock;
return;
}
D->addAttr(::new (S.Context)
@@ -2742,14 +2856,12 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->getFunctionType() &&
D->getFunctionType()->getReturnType()->isVoidType()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method)
- << AL.getName() << 0;
+ S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method) << AL << 0;
return;
}
if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
if (MD->getReturnType()->isVoidType()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::warn_attribute_void_function_method) << AL << 1;
return;
}
@@ -2757,7 +2869,7 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
// about using it as an extension.
if (!S.getLangOpts().CPlusPlus17 && AL.isCXX11Attribute() &&
!AL.getScopeName())
- S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL.getName();
+ S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL;
D->addAttr(::new (S.Context)
WarnUnusedResultAttr(AL.getRange(), S.Context,
@@ -2777,7 +2889,7 @@ static void handleWeakImportAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Nothing to warn about here.
} else
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedVariableOrFunction;
+ << AL << ExpectedVariableOrFunction;
return;
}
@@ -2793,11 +2905,12 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
const Expr *E = AL.getArgAsExpr(i);
- if (!checkUInt32Argument(S, AL, E, WGSize[i], i))
+ if (!checkUInt32Argument(S, AL, E, WGSize[i], i,
+ /*StrictlyUnsigned=*/true))
return;
if (WGSize[i] == 0) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
- << AL.getName() << E->getSourceRange();
+ << AL << E->getSourceRange();
return;
}
}
@@ -2806,7 +2919,7 @@ static void handleWorkGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
if (Existing && !(Existing->getXDim() == WGSize[0] &&
Existing->getYDim() == WGSize[1] &&
Existing->getZDim() == WGSize[2]))
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
D->addAttr(::new (S.Context) WorkGroupAttr(AL.getRange(), S.Context,
WGSize[0], WGSize[1], WGSize[2],
@@ -2821,14 +2934,14 @@ static void handleSubGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
if (SGSize == 0) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_is_zero)
- << AL.getName() << E->getSourceRange();
+ << AL << E->getSourceRange();
return;
}
OpenCLIntelReqdSubGroupSizeAttr *Existing =
D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
if (Existing && Existing->getSubGroupSize() != SGSize)
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
D->addAttr(::new (S.Context) OpenCLIntelReqdSubGroupSizeAttr(
AL.getRange(), S.Context, SGSize,
@@ -2837,8 +2950,7 @@ static void handleSubGroupSize(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleVecTypeHint(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.hasParsedType()) {
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
return;
}
@@ -2856,7 +2968,7 @@ static void handleVecTypeHint(Sema &S, Decl *D, const ParsedAttr &AL) {
if (VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>()) {
if (!S.Context.hasSameType(A->getTypeHint(), ParmType)) {
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
return;
}
}
@@ -3030,7 +3142,7 @@ static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
MinVectorWidthAttr *Existing = D->getAttr<MinVectorWidthAttr>();
if (Existing && Existing->getVectorWidth() != VecWidth) {
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
return;
}
@@ -3100,7 +3212,7 @@ static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 0 << AANT_ArgumentIdentifier;
+ << AL << 0 << AANT_ArgumentIdentifier;
return;
}
@@ -3108,8 +3220,7 @@ static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
IdentifierInfo *II = AL.getArgAsIdent(0)->Ident;
if (!EnumExtensibilityAttr::ConvertStrToKind(II->getName(),
ExtensibilityKind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << II;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << II;
return;
}
@@ -3188,7 +3299,7 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) {
/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.getLangOpts().CPlusPlus) {
- S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL.getName();
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored) << AL;
return;
}
@@ -3215,7 +3326,7 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (prioritynum < 101 || prioritynum > 65535) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_outof_range)
- << E->getSourceRange() << AL.getName() << 101 << 65535;
+ << E->getSourceRange() << AL << 101 << 65535;
AL.setInvalid();
return;
}
@@ -3250,7 +3361,7 @@ FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range,
static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ << AL << 1 << AANT_ArgumentIdentifier;
return;
}
@@ -3275,7 +3386,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (Kind == InvalidFormat) {
S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << II->getName();
+ << AL << II->getName();
return;
}
@@ -3287,7 +3398,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (Idx < 1 || Idx > NumArgs) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL.getName() << 2 << IdxExpr->getSourceRange();
+ << AL << 2 << IdxExpr->getSourceRange();
return;
}
@@ -3358,7 +3469,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// if 0 it disables parameter checking (to use with e.g. va_list)
} else if (FirstArg != 0 && FirstArg != NumArgs) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL.getName() << 3 << FirstArgExpr->getSourceRange();
+ << AL << 3 << FirstArgExpr->getSourceRange();
return;
}
@@ -3379,8 +3490,8 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RD = dyn_cast<RecordDecl>(D);
if (!RD || !RD->isUnion()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedUnion;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type) << AL
+ << ExpectedUnion;
return;
}
@@ -3513,8 +3624,7 @@ void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// check the attribute arguments.
if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
return;
}
@@ -3794,8 +3904,8 @@ static void handleModeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// This attribute isn't documented, but glibc uses it. It changes
// the width of an int or unsigned int to the specified size.
if (!AL.isArgIdent(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) << AL.getName()
- << AANT_ArgumentIdentifier;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
return;
}
@@ -3968,26 +4078,55 @@ AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
AttrSpellingListIndex);
}
-CommonAttr *Sema::mergeCommonAttr(Decl *D, SourceRange Range,
- IdentifierInfo *Ident,
- unsigned AttrSpellingListIndex) {
- if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, Range, Ident))
+CommonAttr *Sema::mergeCommonAttr(Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, AL))
+ return nullptr;
+
+ return ::new (Context)
+ CommonAttr(AL.getRange(), Context, AL.getAttributeSpellingListIndex());
+}
+
+CommonAttr *Sema::mergeCommonAttr(Decl *D, const CommonAttr &AL) {
+ if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, AL))
return nullptr;
- return ::new (Context) CommonAttr(Range, Context, AttrSpellingListIndex);
+ return ::new (Context)
+ CommonAttr(AL.getRange(), Context, AL.getSpellingListIndex());
}
+InternalLinkageAttr *Sema::mergeInternalLinkageAttr(Decl *D,
+ const ParsedAttr &AL) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ // Attribute applies to Var but not any subclass of it (like ParmVar,
+ // ImplicitParm or VarTemplateSpecialization).
+ if (VD->getKind() != Decl::Var) {
+ Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
+ return nullptr;
+ }
+ // Attribute does not apply to non-static local variables.
+ if (VD->hasLocalStorage()) {
+ Diag(VD->getLocation(), diag::warn_internal_linkage_local_storage);
+ return nullptr;
+ }
+ }
+
+ if (checkAttrMutualExclusion<CommonAttr>(*this, D, AL))
+ return nullptr;
+
+ return ::new (Context) InternalLinkageAttr(
+ AL.getRange(), Context, AL.getAttributeSpellingListIndex());
+}
InternalLinkageAttr *
-Sema::mergeInternalLinkageAttr(Decl *D, SourceRange Range,
- IdentifierInfo *Ident,
- unsigned AttrSpellingListIndex) {
+Sema::mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
// Attribute applies to Var but not any subclass of it (like ParmVar,
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
- Diag(Range.getBegin(), diag::warn_attribute_wrong_decl_type)
- << Ident << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
- : ExpectedVariableOrFunction);
+ Diag(AL.getLocation(), diag::warn_attribute_wrong_decl_type)
+ << &AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
return nullptr;
}
// Attribute does not apply to non-static local variables.
@@ -3997,11 +4136,11 @@ Sema::mergeInternalLinkageAttr(Decl *D, SourceRange Range,
}
}
- if (checkAttrMutualExclusion<CommonAttr>(*this, D, Range, Ident))
+ if (checkAttrMutualExclusion<CommonAttr>(*this, D, AL))
return nullptr;
return ::new (Context)
- InternalLinkageAttr(Range, Context, AttrSpellingListIndex);
+ InternalLinkageAttr(AL.getRange(), Context, AL.getSpellingListIndex());
}
MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range,
@@ -4039,8 +4178,7 @@ OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
}
static void handleAlwaysInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, AL))
return;
if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr(
@@ -4062,8 +4200,7 @@ static void handleOptimizeNoneAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleConstantAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL))
return;
const auto *VD = cast<VarDecl>(D);
if (!VD->hasGlobalStorage()) {
@@ -4075,13 +4212,12 @@ static void handleConstantAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL))
return;
const auto *VD = cast<VarDecl>(D);
// extern __shared__ is only allowed on arrays with no length (e.g.
// "int x[]").
- if (!S.getLangOpts().CUDARelocatableDeviceCode && VD->hasExternalStorage() &&
+ if (!S.getLangOpts().GPURelocatableDeviceCode && VD->hasExternalStorage() &&
!isa<IncompleteArrayType>(VD->getType())) {
S.Diag(AL.getLoc(), diag::err_cuda_extern_shared) << VD;
return;
@@ -4095,10 +4231,8 @@ static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, AL.getRange(),
- AL.getName()) ||
- checkAttrMutualExclusion<CUDAHostAttr>(S, D, AL.getRange(),
- AL.getName())) {
+ if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, AL) ||
+ checkAttrMutualExclusion<CUDAHostAttr>(S, D, AL)) {
return;
}
const auto *FD = cast<FunctionDecl>(D);
@@ -4112,15 +4246,15 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (const auto *Method = dyn_cast<CXXMethodDecl>(FD)) {
if (Method->isInstance()) {
- S.Diag(Method->getLocStart(), diag::err_kern_is_nonstatic_method)
+ S.Diag(Method->getBeginLoc(), diag::err_kern_is_nonstatic_method)
<< Method;
return;
}
- S.Diag(Method->getLocStart(), diag::warn_kern_is_method) << Method;
+ S.Diag(Method->getBeginLoc(), diag::warn_kern_is_method) << Method;
}
// Only warn for "inline" when compiling for host, to cut down on noise.
if (FD->isInlineSpecified() && !S.getLangOpts().CUDAIsDevice)
- S.Diag(FD->getLocStart(), diag::warn_kern_is_inline) << FD;
+ S.Diag(FD->getBeginLoc(), diag::warn_kern_is_inline) << FD;
D->addAttr(::new (S.Context)
CUDAGlobalAttr(AL.getRange(), S.Context,
@@ -4150,7 +4284,7 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionOrMethod;
+ << AL << ExpectedFunctionOrMethod;
return;
}
@@ -4222,6 +4356,11 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AL.getAttributeSpellingListIndex()));
return;
}
+ case ParsedAttr::AT_AArch64VectorPcs:
+ D->addAttr(::new(S.Context)
+ AArch64VectorPcsAttr(AL.getRange(), S.Context,
+ AL.getAttributeSpellingListIndex()));
+ return;
case ParsedAttr::AT_IntelOclBicc:
D->addAttr(::new (S.Context)
IntelOclBiccAttr(AL.getRange(), S.Context,
@@ -4299,6 +4438,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_VectorCall:
CC = CC_X86VectorCall;
break;
+ case ParsedAttr::AT_AArch64VectorPcs:
+ CC = CC_AArch64VectorCall;
+ break;
case ParsedAttr::AT_RegCall:
CC = CC_X86RegCall;
break;
@@ -4344,7 +4486,7 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC);
if (A != TargetInfo::CCCR_OK) {
if (A == TargetInfo::CCCR_Warning)
- Diag(Attrs.getLoc(), diag::warn_cconv_ignored) << Attrs.getName();
+ Diag(Attrs.getLoc(), diag::warn_cconv_ignored) << Attrs;
// This convention is not valid for the target. Use the default function or
// method calling convention.
@@ -4559,7 +4701,7 @@ static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << /* arg num = */ 1 << AANT_ArgumentIdentifier;
+ << AL << /* arg num = */ 1 << AANT_ArgumentIdentifier;
return;
}
@@ -4579,8 +4721,7 @@ static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
unsigned ArgumentIdxAST = ArgumentIdx.getASTIndex();
if (ArgumentIdxAST >= getFunctionOrMethodNumParams(D) ||
!getFunctionOrMethodParamType(D, ArgumentIdxAST)->isPointerType())
- S.Diag(AL.getLoc(), diag::err_attribute_pointers_only)
- << AL.getName() << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_pointers_only) << AL << 0;
}
D->addAttr(::new (S.Context) ArgumentWithTypeTagAttr(
@@ -4592,7 +4733,7 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
- << AL.getName() << 1 << AANT_ArgumentIdentifier;
+ << AL << 1 << AANT_ArgumentIdentifier;
return;
}
@@ -4601,7 +4742,7 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
if (!isa<VarDecl>(D)) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type)
- << AL.getName() << ExpectedVariable;
+ << AL << ExpectedVariable;
return;
}
@@ -4635,58 +4776,84 @@ static void handleXRayLogArgsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
-
static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType QT) {
return QT->isDependentType() || QT->isObjCRetainableType();
}
-static bool isValidSubjectOfNSAttribute(Sema &S, QualType QT) {
+static bool isValidSubjectOfNSAttribute(QualType QT) {
return QT->isDependentType() || QT->isObjCObjectPointerType() ||
- S.Context.isObjCNSObjectType(QT);
+ QT->isObjCNSObjectType();
}
-static bool isValidSubjectOfCFAttribute(Sema &S, QualType QT) {
+static bool isValidSubjectOfCFAttribute(QualType QT) {
return QT->isDependentType() || QT->isPointerType() ||
- isValidSubjectOfNSAttribute(S, QT);
+ isValidSubjectOfNSAttribute(QT);
}
-static void handleNSConsumedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- S.AddNSConsumedAttr(AL.getRange(), D, AL.getAttributeSpellingListIndex(),
- AL.getKind() == ParsedAttr::AT_NSConsumed,
- /*template instantiation*/ false);
+static bool isValidSubjectOfOSAttribute(QualType QT) {
+ if (QT->isDependentType())
+ return true;
+ QualType PT = QT->getPointeeType();
+ return !PT.isNull() && PT->getAsCXXRecordDecl() != nullptr;
}
-void Sema::AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
- unsigned SpellingIndex, bool IsNSConsumed,
- bool IsTemplateInstantiation) {
- const auto *Param = cast<ParmVarDecl>(D);
- bool TypeOK;
-
- if (IsNSConsumed)
- TypeOK = isValidSubjectOfNSAttribute(*this, Param->getType());
- else
- TypeOK = isValidSubjectOfCFAttribute(*this, Param->getType());
+void Sema::AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
+ RetainOwnershipKind K,
+ bool IsTemplateInstantiation) {
+ ValueDecl *VD = cast<ValueDecl>(D);
+ switch (K) {
+ case RetainOwnershipKind::OS:
+ handleSimpleAttributeOrDiagnose<OSConsumedAttr>(
+ *this, VD, SR, SpellingIndex, isValidSubjectOfOSAttribute(VD->getType()),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/SR, "os_consumed", /*pointers*/ 1);
+ return;
+ case RetainOwnershipKind::NS:
+ handleSimpleAttributeOrDiagnose<NSConsumedAttr>(
+ *this, VD, SR, SpellingIndex, isValidSubjectOfNSAttribute(VD->getType()),
- if (!TypeOK) {
- // These attributes are normally just advisory, but in ARC, ns_consumed
- // is significant. Allow non-dependent code to contain inappropriate
- // attributes even in ARC, but require template instantiations to be
- // set up correctly.
- Diag(D->getLocStart(), (IsTemplateInstantiation && IsNSConsumed &&
- getLangOpts().ObjCAutoRefCount
- ? diag::err_ns_attribute_wrong_parameter_type
- : diag::warn_ns_attribute_wrong_parameter_type))
- << AttrRange << (IsNSConsumed ? "ns_consumed" : "cf_consumed")
- << (IsNSConsumed ? /*objc pointers*/ 0 : /*cf pointers*/ 1);
+ // These attributes are normally just advisory, but in ARC, ns_consumed
+ // is significant. Allow non-dependent code to contain inappropriate
+ // attributes even in ARC, but require template instantiations to be
+ // set up correctly.
+ ((IsTemplateInstantiation && getLangOpts().ObjCAutoRefCount)
+ ? diag::err_ns_attribute_wrong_parameter_type
+ : diag::warn_ns_attribute_wrong_parameter_type),
+ /*ExtraArgs=*/SR, "ns_consumed", /*objc pointers*/ 0);
+ return;
+ case RetainOwnershipKind::CF:
+ handleSimpleAttributeOrDiagnose<CFConsumedAttr>(
+ *this, VD, SR, SpellingIndex,
+ isValidSubjectOfCFAttribute(VD->getType()),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*ExtraArgs=*/SR, "cf_consumed", /*pointers*/1);
return;
}
+}
- if (IsNSConsumed)
- D->addAttr(::new (Context)
- NSConsumedAttr(AttrRange, Context, SpellingIndex));
- else
- D->addAttr(::new (Context)
- CFConsumedAttr(AttrRange, Context, SpellingIndex));
+static Sema::RetainOwnershipKind
+parsedAttrToRetainOwnershipKind(const ParsedAttr &AL) {
+ switch (AL.getKind()) {
+ case ParsedAttr::AT_CFConsumed:
+ case ParsedAttr::AT_CFReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
+ return Sema::RetainOwnershipKind::CF;
+ case ParsedAttr::AT_OSConsumesThis:
+ case ParsedAttr::AT_OSConsumed:
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ case ParsedAttr::AT_OSReturnsRetainedOnZero:
+ case ParsedAttr::AT_OSReturnsRetainedOnNonZero:
+ return Sema::RetainOwnershipKind::OS;
+ case ParsedAttr::AT_NSConsumesSelf:
+ case ParsedAttr::AT_NSConsumed:
+ case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_NSReturnsNotRetained:
+ case ParsedAttr::AT_NSReturnsAutoreleased:
+ return Sema::RetainOwnershipKind::NS;
+ default:
+ llvm_unreachable("Wrong argument supplied");
+ }
}
bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
@@ -4698,25 +4865,40 @@ bool Sema::checkNSReturnsRetainedReturnType(SourceLocation Loc, QualType QT) {
return true;
}
-static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
+/// \return whether the parameter is a pointer to OSObject pointer.
+static bool isValidOSObjectOutParameter(const Decl *D) {
+ const auto *PVD = dyn_cast<ParmVarDecl>(D);
+ if (!PVD)
+ return false;
+ QualType QT = PVD->getType();
+ QualType PT = QT->getPointeeType();
+ return !PT.isNull() && isValidSubjectOfOSAttribute(PT);
+}
+
+static void handleXReturnsXRetainedAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
QualType ReturnType;
+ Sema::RetainOwnershipKind K = parsedAttrToRetainOwnershipKind(AL);
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
ReturnType = MD->getReturnType();
- else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
- (AL.getKind() == ParsedAttr::AT_NSReturnsRetained))
+ } else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
+ (AL.getKind() == ParsedAttr::AT_NSReturnsRetained)) {
return; // ignore: was handled as a type attribute
- else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D))
+ } else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(D)) {
ReturnType = PD->getType();
- else if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
ReturnType = FD->getReturnType();
- else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ } else if (const auto *Param = dyn_cast<ParmVarDecl>(D)) {
+ // Attributes on parameters are used for out-parameters,
+ // passed as pointers-to-pointers.
+ unsigned DiagID = K == Sema::RetainOwnershipKind::CF
+ ? /*pointer-to-CF-pointer*/2
+ : /*pointer-to-OSObject-pointer*/3;
ReturnType = Param->getType()->getPointeeType();
if (ReturnType.isNull()) {
- S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
- << AL.getName() << /*pointer-to-CF*/2
- << AL.getRange();
+ S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
+ << AL << DiagID << AL.getRange();
return;
}
} else if (AL.isUsedAsTypeAttr()) {
@@ -4731,18 +4913,21 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
ExpectedDeclKind = ExpectedFunctionOrMethod;
break;
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
case ParsedAttr::AT_CFReturnsRetained:
case ParsedAttr::AT_CFReturnsNotRetained:
ExpectedDeclKind = ExpectedFunctionMethodOrParameter;
break;
}
- S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << AL.getRange() << AL.getName() << ExpectedDeclKind;
+ S.Diag(D->getBeginLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getRange() << AL << ExpectedDeclKind;
return;
}
bool TypeOK;
bool Cf;
+ unsigned ParmDiagID = 2; // Pointer-to-CF-pointer
switch (AL.getKind()) {
default: llvm_unreachable("invalid ownership attribute");
case ParsedAttr::AT_NSReturnsRetained:
@@ -4752,14 +4937,21 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
case ParsedAttr::AT_NSReturnsAutoreleased:
case ParsedAttr::AT_NSReturnsNotRetained:
- TypeOK = isValidSubjectOfNSAttribute(S, ReturnType);
+ TypeOK = isValidSubjectOfNSAttribute(ReturnType);
Cf = false;
break;
case ParsedAttr::AT_CFReturnsRetained:
case ParsedAttr::AT_CFReturnsNotRetained:
- TypeOK = isValidSubjectOfCFAttribute(S, ReturnType);
+ TypeOK = isValidSubjectOfCFAttribute(ReturnType);
+ Cf = true;
+ break;
+
+ case ParsedAttr::AT_OSReturnsRetained:
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ TypeOK = isValidSubjectOfOSAttribute(ReturnType);
Cf = true;
+ ParmDiagID = 3; // Pointer-to-OSObject-pointer
break;
}
@@ -4768,9 +4960,8 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
return;
if (isa<ParmVarDecl>(D)) {
- S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
- << AL.getName() << /*pointer-to-CF*/2
- << AL.getRange();
+ S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_parameter_type)
+ << AL << ParmDiagID << AL.getRange();
} else {
// Needs to be kept in sync with warn_ns_attribute_wrong_return_type.
enum : unsigned {
@@ -4782,9 +4973,8 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
SubjectKind = Method;
else if (isa<ObjCPropertyDecl>(D))
SubjectKind = Property;
- S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
- << AL.getName() << SubjectKind << Cf
- << AL.getRange();
+ S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << AL << SubjectKind << Cf << AL.getRange();
}
return;
}
@@ -4793,24 +4983,25 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
default:
llvm_unreachable("invalid ownership attribute");
case ParsedAttr::AT_NSReturnsAutoreleased:
- D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsAutoreleasedAttr>(S, D, AL);
return;
case ParsedAttr::AT_CFReturnsNotRetained:
- D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<CFReturnsNotRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_NSReturnsNotRetained:
- D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsNotRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_CFReturnsRetained:
- D->addAttr(::new (S.Context) CFReturnsRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<CFReturnsRetainedAttr>(S, D, AL);
return;
case ParsedAttr::AT_NSReturnsRetained:
- D->addAttr(::new (S.Context) NSReturnsRetainedAttr(
- AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
+ handleSimpleAttribute<NSReturnsRetainedAttr>(S, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsRetained:
+ handleSimpleAttribute<OSReturnsRetainedAttr>(S, D, AL);
+ return;
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ handleSimpleAttribute<OSReturnsNotRetainedAttr>(S, D, AL);
return;
};
}
@@ -4829,11 +5020,10 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
if (!resultType->isReferenceType() &&
(!resultType->isPointerType() || resultType->isObjCRetainableType())) {
- S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
- << SourceRange(loc)
- << Attrs.getName()
- << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
- << /*non-retainable pointer*/ 2;
+ S.Diag(D->getBeginLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << SourceRange(loc) << Attrs
+ << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty)
+ << /*non-retainable pointer*/ 2;
// Drop the attribute.
return;
@@ -4849,14 +5039,14 @@ static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
const DeclContext *DC = Method->getDeclContext();
if (const auto *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
- S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
- << Attrs.getName() << 0;
+ S.Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol) << Attrs
+ << 0;
S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
return;
}
if (Method->getMethodFamily() == OMF_dealloc) {
- S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol)
- << Attrs.getName() << 1;
+ S.Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol) << Attrs
+ << 1;
return;
}
@@ -4868,15 +5058,14 @@ static void handleObjCBridgeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
if (!Parm) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
+ S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
return;
}
// Typedefs only allow objc_bridge(id) and have some additional checking.
if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) {
if (!Parm->Ident->isStr("id")) {
- S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_id)
- << AL.getName();
+ S.Diag(AL.getLoc(), diag::err_objc_attr_typedef_not_id) << AL;
return;
}
@@ -4898,7 +5087,7 @@ static void handleObjCBridgeMutableAttr(Sema &S, Decl *D,
IdentifierLoc *Parm = AL.isArgIdent(0) ? AL.getArgAsIdent(0) : nullptr;
if (!Parm) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
+ S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
return;
}
@@ -4912,7 +5101,7 @@ static void handleObjCBridgeRelatedAttr(Sema &S, Decl *D,
IdentifierInfo *RelatedClass =
AL.isArgIdent(0) ? AL.getArgAsIdent(0)->Ident : nullptr;
if (!RelatedClass) {
- S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << AL.getName() << 0;
+ S.Diag(D->getBeginLoc(), diag::err_objc_attr_not_id) << AL << 0;
return;
}
IdentifierInfo *ClassMethod =
@@ -4982,8 +5171,8 @@ static void handleObjCBoxable(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleObjCOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (hasDeclarator(D)) return;
- S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
- << AL.getRange() << AL.getName() << ExpectedVariable;
+ S.Diag(D->getBeginLoc(), diag::err_attribute_wrong_decl_type)
+ << AL.getRange() << AL << ExpectedVariable;
}
static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
@@ -5047,7 +5236,7 @@ UuidAttr *Sema::mergeUuidAttr(Decl *D, SourceRange Range,
static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
- << AL.getName() << AttributeLangSupport::C;
+ << AL << AttributeLangSupport::C;
return;
}
@@ -5097,7 +5286,7 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
- << AL.getName() << AttributeLangSupport::C;
+ << AL << AttributeLangSupport::C;
return;
}
MSInheritanceAttr *IA = S.mergeMSInheritanceAttr(
@@ -5152,7 +5341,7 @@ static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
// Store tags sorted and without duplicates.
- llvm::sort(Tags.begin(), Tags.end());
+ llvm::sort(Tags);
Tags.erase(std::unique(Tags.begin(), Tags.end()), Tags.end());
D->addAttr(::new (S.Context)
@@ -5163,8 +5352,7 @@ static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check the attribute arguments.
if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
return;
}
@@ -5178,8 +5366,8 @@ static void handleARMInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ARMInterruptAttr::InterruptType Kind;
if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << Str << ArgLoc;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << Str
+ << ArgLoc;
return;
}
@@ -5193,8 +5381,8 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
if (!AL.isArgExpr(0)) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_type) << AL.getName()
- << AANT_ArgumentIntegerConstant;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant;
return;
}
@@ -5204,16 +5392,16 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
llvm::APSInt NumParams(32);
if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
- << AL.getName() << AANT_ArgumentIntegerConstant
- << NumParamsExpr->getSourceRange();
+ << AL << AANT_ArgumentIntegerConstant
+ << NumParamsExpr->getSourceRange();
return;
}
unsigned Num = NumParams.getLimitedValue(255);
if ((Num & 1) || Num > 30) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL.getName() << (int)NumParams.getSExtValue()
- << NumParamsExpr->getSourceRange();
+ << AL << (int)NumParams.getSExtValue()
+ << NumParamsExpr->getSourceRange();
return;
}
@@ -5226,8 +5414,7 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Only one optional argument permitted.
if (AL.getNumArgs() > 1) {
- S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
return;
}
@@ -5266,14 +5453,13 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (checkAttrMutualExclusion<Mips16Attr>(S, D, AL.getRange(),
- AL.getName()))
+ if (checkAttrMutualExclusion<Mips16Attr>(S, D, AL))
return;
MipsInterruptAttr::InterruptType Kind;
if (!MipsInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << "'" + std::string(Str) + "'";
+ << AL << "'" + std::string(Str) + "'";
return;
}
@@ -5292,7 +5478,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
CXXMethodDecl::isStaticOverloadedOperator(
cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionWithProtoType;
+ << AL << ExpectedFunctionWithProtoType;
return;
}
// Interrupt handler must have void return type.
@@ -5308,7 +5494,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Interrupt handler must have 1 or 2 parameters.
unsigned NumParams = getFunctionOrMethodNumParams(D);
if (NumParams < 1 || NumParams > 2) {
- S.Diag(D->getLocStart(), diag::err_anyx86_interrupt_attribute)
+ S.Diag(D->getBeginLoc(), diag::err_anyx86_interrupt_attribute)
<< (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86
? 0
: 1)
@@ -5421,8 +5607,8 @@ static void handleRISCVInterruptAttr(Sema &S, Decl *D,
RISCVInterruptAttr::InterruptType Kind;
if (!RISCVInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
- S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
- << AL.getName() << Str << ArgLoc;
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported) << AL << Str
+ << ArgLoc;
return;
}
@@ -5470,13 +5656,11 @@ static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
return;
if (Min == 0 && Max != 0) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
- << AL.getName() << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 0;
return;
}
if (Min > Max) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 1;
return;
}
@@ -5499,13 +5683,11 @@ static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (Min == 0 && Max != 0) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
- << AL.getName() << 0;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 0;
return;
}
if (Max != 0 && Min > Max) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid)
- << AL.getName() << 1;
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_invalid) << AL << 1;
return;
}
@@ -5552,7 +5734,7 @@ static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
// Attribute can only be applied to function types.
if (!isa<FunctionDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunction;
+ << AL << ExpectedFunction;
return;
}
@@ -5568,12 +5750,17 @@ static void handleLayoutVersion(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
// TODO: Investigate what happens with the next major version of MSVC.
- if (Version != LangOptions::MSVC2015) {
+ if (Version != LangOptions::MSVC2015 / 100) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL.getName() << Version << VersionExpr->getSourceRange();
+ << AL << Version << VersionExpr->getSourceRange();
return;
}
+ // The attribute expects a "major" version number like 19, but new versions of
+ // MSVC have moved to updating the "minor", or less significant numbers, so we
+ // have to multiply by 100 now.
+ Version *= 100;
+
D->addAttr(::new (S.Context)
LayoutVersionAttr(AL.getRange(), S.Context, Version,
AL.getAttributeSpellingListIndex()));
@@ -5608,8 +5795,7 @@ DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range,
static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
if (isa<ClassTemplatePartialSpecializationDecl>(D) &&
S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored)
- << A.getName();
+ S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored) << A;
return;
}
@@ -5618,7 +5804,7 @@ static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
!S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// MinGW doesn't allow dllimport on inline functions.
S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline)
- << A.getName();
+ << A;
return;
}
}
@@ -5626,7 +5812,7 @@ static void handleDLLAttr(Sema &S, Decl *D, const ParsedAttr &A) {
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() &&
MD->getParent()->isLambda()) {
- S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A.getName();
+ S.Diag(A.getRange().getBegin(), diag::err_attribute_dll_lambda) << A;
return;
}
}
@@ -5788,10 +5974,8 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
!S.checkStringLiteralArgumentAttr(AL, 1, Replacement))
return;
- if (!S.getLangOpts().CPlusPlus14)
- if (AL.isCXX11Attribute() &&
- !(AL.hasScope() && AL.getScopeName()->isStr("gnu")))
- S.Diag(AL.getLoc(), diag::ext_cxx14_attr) << AL.getName();
+ if (!S.getLangOpts().CPlusPlus14 && AL.isCXX11Attribute() && !AL.isGNUScope())
+ S.Diag(AL.getLoc(), diag::ext_cxx14_attr) << AL;
D->addAttr(::new (S.Context)
DeprecatedAttr(AL.getRange(), S.Context, Str, Replacement,
@@ -5821,7 +6005,7 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
else if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunctionOrMethod;
+ << AL << ExpectedFunctionOrMethod;
Sanitizers.push_back(SanitizerName);
}
@@ -5841,26 +6025,24 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
.Case("no_sanitize_memory", "memory");
if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << AL.getName() << ExpectedFunction;
+ << AL << ExpectedFunction;
D->addAttr(::new (S.Context)
NoSanitizeAttr(AL.getRange(), S.Context, &SanitizerName, 1,
AL.getAttributeSpellingListIndex()));
}
static void handleInternalLinkageAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (InternalLinkageAttr *Internal =
- S.mergeInternalLinkageAttr(D, AL.getRange(), AL.getName(),
- AL.getAttributeSpellingListIndex()))
+ if (InternalLinkageAttr *Internal = S.mergeInternalLinkageAttr(D, AL))
D->addAttr(Internal);
}
static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (S.LangOpts.OpenCLVersion != 200)
S.Diag(AL.getLoc(), diag::err_attribute_requires_opencl_version)
- << AL.getName() << "2.0" << 0;
+ << AL << "2.0" << 0;
else
- S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
- << AL.getName() << "2.0";
+ S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored) << AL
+ << "2.0";
}
/// Handles semantic checking for features that are common to all attributes,
@@ -5912,10 +6094,16 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Check if there is only one access qualifier.
if (D->hasAttr<OpenCLAccessAttr>()) {
- S.Diag(AL.getLoc(), diag::err_opencl_multiple_access_qualifiers)
- << D->getSourceRange();
- D->setInvalidDecl(true);
- return;
+ if (D->getAttr<OpenCLAccessAttr>()->getSemanticSpelling() ==
+ AL.getSemanticSpelling()) {
+ S.Diag(AL.getLoc(), diag::warn_duplicate_declspec)
+ << AL.getName()->getName() << AL.getRange();
+ } else {
+ S.Diag(AL.getLoc(), diag::err_opencl_multiple_access_qualifiers)
+ << D->getSourceRange();
+ D->setInvalidDecl(true);
+ return;
+ }
}
// OpenCL v2.0 s6.6 - read_write can be used for image types to specify that an
@@ -5928,7 +6116,7 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (AL.getName()->getName().find("read_write") != StringRef::npos) {
if (S.getLangOpts().OpenCLVersion < 200 || DeclTy->isPipeType()) {
S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
- << AL.getName() << PDecl->getType() << DeclTy->isImageType();
+ << AL << PDecl->getType() << DeclTy->isImageType();
D->setInvalidDecl(true);
return;
}
@@ -5939,6 +6127,100 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
AL.getRange(), S.Context, AL.getAttributeSpellingListIndex()));
}
+static void handleDestroyAttr(Sema &S, Decl *D, const ParsedAttr &A) {
+ if (!cast<VarDecl>(D)->hasGlobalStorage()) {
+ S.Diag(D->getLocation(), diag::err_destroy_attr_on_non_static_var)
+ << (A.getKind() == ParsedAttr::AT_AlwaysDestroy);
+ return;
+ }
+
+ if (A.getKind() == ParsedAttr::AT_AlwaysDestroy)
+ handleSimpleAttributeWithExclusions<AlwaysDestroyAttr, NoDestroyAttr>(S, D, A);
+ else
+ handleSimpleAttributeWithExclusions<NoDestroyAttr, AlwaysDestroyAttr>(S, D, A);
+}
+
+static void handleUninitializedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ assert(cast<VarDecl>(D)->getStorageDuration() == SD_Automatic &&
+ "uninitialized is only valid on automatic duration variables");
+ unsigned Index = AL.getAttributeSpellingListIndex();
+ D->addAttr(::new (S.Context)
+ UninitializedAttr(AL.getLoc(), S.Context, Index));
+}
+
+static bool tryMakeVariablePseudoStrong(Sema &S, VarDecl *VD,
+ bool DiagnoseFailure) {
+ QualType Ty = VD->getType();
+ if (!Ty->isObjCRetainableType()) {
+ if (DiagnoseFailure) {
+ S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
+ << 0;
+ }
+ return false;
+ }
+
+ Qualifiers::ObjCLifetime LifetimeQual = Ty.getQualifiers().getObjCLifetime();
+
+ // Sema::inferObjCARCLifetime must run after processing decl attributes
+ // (because __block lowers to an attribute), so if the lifetime hasn't been
+ // explicitly specified, infer it locally now.
+ if (LifetimeQual == Qualifiers::OCL_None)
+ LifetimeQual = Ty->getObjCARCImplicitLifetime();
+
+ // The attributes only really makes sense for __strong variables; ignore any
+ // attempts to annotate a parameter with any other lifetime qualifier.
+ if (LifetimeQual != Qualifiers::OCL_Strong) {
+ if (DiagnoseFailure) {
+ S.Diag(VD->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
+ << 1;
+ }
+ return false;
+ }
+
+ // Tampering with the type of a VarDecl here is a bit of a hack, but we need
+ // to ensure that the variable is 'const' so that we can error on
+ // modification, which can otherwise over-release.
+ VD->setType(Ty.withConst());
+ VD->setARCPseudoStrong(true);
+ return true;
+}
+
+static void handleObjCExternallyRetainedAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ assert(!isa<ParmVarDecl>(VD) && "should be diagnosed automatically");
+ if (!VD->hasLocalStorage()) {
+ S.Diag(D->getBeginLoc(), diag::warn_ignored_objc_externally_retained)
+ << 0;
+ return;
+ }
+
+ if (!tryMakeVariablePseudoStrong(S, VD, /*DiagnoseFailure=*/true))
+ return;
+
+ handleSimpleAttribute<ObjCExternallyRetainedAttr>(S, D, AL);
+ return;
+ }
+
+ // If D is a function-like declaration (method, block, or function), then we
+ // make every parameter psuedo-strong.
+ for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E; ++I) {
+ auto *PVD = const_cast<ParmVarDecl *>(getFunctionOrMethodParam(D, I));
+ QualType Ty = PVD->getType();
+
+ // If a user wrote a parameter with __strong explicitly, then assume they
+ // want "real" strong semantics for that parameter. This works because if
+ // the parameter was written with __strong, then the strong qualifier will
+ // be non-local.
+ if (Ty.getLocalUnqualifiedType().getQualifiers().getObjCLifetime() ==
+ Qualifiers::OCL_Strong)
+ continue;
+
+ tryMakeVariablePseudoStrong(S, PVD, /*DiagnoseFailure=*/false);
+ }
+ handleSimpleAttribute<ObjCExternallyRetainedAttr>(S, D, AL);
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -5962,10 +6244,11 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
// though they were unknown attributes.
if (AL.getKind() == ParsedAttr::UnknownAttribute ||
!AL.existsInTarget(S.Context.getTargetInfo())) {
- S.Diag(AL.getLoc(), AL.isDeclspecAttribute()
- ? diag::warn_unhandled_ms_attribute_ignored
- : diag::warn_unknown_attribute_ignored)
- << AL.getName();
+ S.Diag(AL.getLoc(),
+ AL.isDeclspecAttribute()
+ ? (unsigned)diag::warn_unhandled_ms_attribute_ignored
+ : (unsigned)diag::warn_unknown_attribute_ignored)
+ << AL;
return;
}
@@ -5980,7 +6263,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
break;
}
S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
- << AL.getName() << D->getLocation();
+ << AL << D->getLocation();
break;
case ParsedAttr::AT_Interrupt:
handleInterruptAttr(S, D, AL);
@@ -6259,17 +6542,37 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
break;
case ParsedAttr::AT_CFConsumed:
case ParsedAttr::AT_NSConsumed:
- handleNSConsumedAttr(S, D, AL);
+ case ParsedAttr::AT_OSConsumed:
+ S.AddXConsumedAttr(D, AL.getRange(), AL.getAttributeSpellingListIndex(),
+ parsedAttrToRetainOwnershipKind(AL),
+ /*IsTemplateInstantiation=*/false);
break;
case ParsedAttr::AT_NSConsumesSelf:
handleSimpleAttribute<NSConsumesSelfAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_OSConsumesThis:
+ handleSimpleAttribute<OSConsumesThisAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_OSReturnsRetainedOnZero:
+ handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnZeroAttr>(
+ S, D, AL, isValidOSObjectOutParameter(D),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*Extra Args=*/AL, /*pointer-to-OSObject-pointer*/ 3, AL.getRange());
+ break;
+ case ParsedAttr::AT_OSReturnsRetainedOnNonZero:
+ handleSimpleAttributeOrDiagnose<OSReturnsRetainedOnNonZeroAttr>(
+ S, D, AL, isValidOSObjectOutParameter(D),
+ diag::warn_ns_attribute_wrong_parameter_type,
+ /*Extra Args=*/AL, /*pointer-to-OSObject-poointer*/ 3, AL.getRange());
+ break;
case ParsedAttr::AT_NSReturnsAutoreleased:
case ParsedAttr::AT_NSReturnsNotRetained:
- case ParsedAttr::AT_CFReturnsNotRetained:
case ParsedAttr::AT_NSReturnsRetained:
+ case ParsedAttr::AT_CFReturnsNotRetained:
case ParsedAttr::AT_CFReturnsRetained:
- handleNSReturnsRetainedAttr(S, D, AL);
+ case ParsedAttr::AT_OSReturnsNotRetained:
+ case ParsedAttr::AT_OSReturnsRetained:
+ handleXReturnsXRetainedAttr(S, D, AL);
break;
case ParsedAttr::AT_WorkGroupSizeHint:
handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, AL);
@@ -6295,6 +6598,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SpeculativeLoadHardening:
+ handleSimpleAttribute<SpeculativeLoadHardeningAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
break;
@@ -6423,6 +6729,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_IntelOclBicc:
case ParsedAttr::AT_PreserveMost:
case ParsedAttr::AT_PreserveAll:
+ case ParsedAttr::AT_AArch64VectorPcs:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
@@ -6449,6 +6756,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ExcludeFromExplicitInstantiation:
+ handleSimpleAttribute<ExcludeFromExplicitInstantiationAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_LTOVisibilityPublic:
handleSimpleAttribute<LTOVisibilityPublicAttr>(S, D, AL);
break;
@@ -6604,6 +6914,24 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_XRayLogArgs:
handleXRayLogArgsAttr(S, D, AL);
break;
+
+ // Move semantics attribute.
+ case ParsedAttr::AT_Reinitializes:
+ handleSimpleAttribute<ReinitializesAttr>(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_AlwaysDestroy:
+ case ParsedAttr::AT_NoDestroy:
+ handleDestroyAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_Uninitialized:
+ handleUninitializedAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_ObjCExternallyRetained:
+ handleObjCExternallyRetainedAttr(S, D, AL);
+ break;
}
}
@@ -6708,10 +7036,10 @@ static void checkUnusedDeclAttributes(Sema &S, const ParsedAttributesView &A) {
if (AL.getKind() == ParsedAttr::UnknownAttribute) {
S.Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
- << AL.getName() << AL.getRange();
+ << AL << AL.getRange();
} else {
- S.Diag(AL.getLoc(), diag::warn_attribute_not_on_decl)
- << AL.getName() << AL.getRange();
+ S.Diag(AL.getLoc(), diag::warn_attribute_not_on_decl) << AL
+ << AL.getRange();
}
}
}
@@ -6942,8 +7270,12 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
/// \param D The declaration to check.
/// \param Message If non-null, this will be populated with the message from
/// the availability attribute that is selected.
+/// \param ClassReceiver If we're checking the the method of a class message
+/// send, the class. Otherwise nullptr.
static std::pair<AvailabilityResult, const NamedDecl *>
-ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
+ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
+ std::string *Message,
+ ObjCInterfaceDecl *ClassReceiver) {
AvailabilityResult Result = D->getAvailability(Message);
// For typedefs, if the typedef declaration appears available look
@@ -6976,6 +7308,20 @@ ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
}
}
+ // For +new, infer availability from -init.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (S.NSAPIObj && ClassReceiver) {
+ ObjCMethodDecl *Init = ClassReceiver->lookupInstanceMethod(
+ S.NSAPIObj->getInitSelector());
+ if (Init && Result == AR_Available && MD->isClassMethod() &&
+ MD->getSelector() == S.NSAPIObj->getNewSelector() &&
+ MD->definedInNSObject(S.getASTContext())) {
+ Result = Init->getAvailability(Message);
+ D = Init;
+ }
+ }
+ }
+
return {Result, D};
}
@@ -6983,9 +7329,10 @@ ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
/// whether we should emit a diagnostic for \c K and \c DeclVersion in
/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
/// in a deprecated context, but not the other way around.
-static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
- VersionTuple DeclVersion,
- Decl *Ctx) {
+static bool
+ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
+ VersionTuple DeclVersion, Decl *Ctx,
+ const NamedDecl *OffendingDecl) {
assert(K != AR_Available && "Expected an unavailable declaration here!");
// Checks if we should emit the availability diagnostic in the context of C.
@@ -6994,9 +7341,22 @@ static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
if (AA->getIntroduced() >= DeclVersion)
return true;
- } else if (K == AR_Deprecated)
+ } else if (K == AR_Deprecated) {
if (C->isDeprecated())
return true;
+ } else if (K == AR_Unavailable) {
+ // It is perfectly fine to refer to an 'unavailable' Objective-C method
+ // when it's actually defined and is referenced from within the
+ // @implementation itself. In this context, we interpret unavailable as a
+ // form of access control.
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(OffendingDecl)) {
+ if (const auto *Impl = dyn_cast<ObjCImplDecl>(C)) {
+ if (MD->getClassInterface() == Impl->getClassInterface() &&
+ MD->isDefined())
+ return true;
+ }
+ }
+ }
if (C->isUnavailable())
return true;
@@ -7078,13 +7438,13 @@ struct AttributeInsertion {
StringRef Suffix;
static AttributeInsertion createInsertionAfter(const NamedDecl *D) {
- return {" ", D->getLocEnd(), ""};
+ return {" ", D->getEndLoc(), ""};
}
static AttributeInsertion createInsertionAfter(SourceLocation Loc) {
return {" ", Loc, ""};
}
static AttributeInsertion createInsertionBefore(const NamedDecl *D) {
- return {"", D->getLocStart(), "\n"};
+ return {"", D->getBeginLoc(), "\n"};
}
};
@@ -7185,7 +7545,8 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
DeclVersion = AA->getIntroduced();
- if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx))
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx,
+ OffendingDecl))
return;
SourceLocation Loc = Locs.front();
@@ -7223,14 +7584,16 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
unsigned Warning = UseNewWarning ? diag::warn_unguarded_availability_new
: diag::warn_unguarded_availability;
- S.Diag(Loc, Warning)
- << OffendingDecl
- << AvailabilityAttr::getPrettyPlatformName(
- S.getASTContext().getTargetInfo().getPlatformName())
- << Introduced.getAsString();
+ std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
+ S.getASTContext().getTargetInfo().getPlatformName());
- S.Diag(OffendingDecl->getLocation(), diag::note_availability_specified_here)
- << OffendingDecl << /* partial */ 3;
+ S.Diag(Loc, Warning) << OffendingDecl << PlatformName
+ << Introduced.getAsString();
+
+ S.Diag(OffendingDecl->getLocation(),
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << S.Context.getTargetInfo().getPlatformMinVersion().getAsString();
if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
if (const auto *TD = dyn_cast<TagDecl>(Enclosing))
@@ -7423,6 +7786,7 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
// for each of the different declarations.
const DelayedDiagnosticPool *pool = &poppedPool;
do {
+ bool AnyAccessFailures = false;
for (DelayedDiagnosticPool::pool_iterator
i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) {
// This const_cast is a bit lame. Really, Triggered should be mutable.
@@ -7439,7 +7803,14 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
break;
case DelayedDiagnostic::Access:
+ // Only produce one access control diagnostic for a structured binding
+ // declaration: we don't need to tell the user that all the fields are
+ // inaccessible one at a time.
+ if (AnyAccessFailures && isa<DecompositionDecl>(decl))
+ continue;
HandleDelayedAccessCheck(diag, decl);
+ if (diag.Triggered)
+ AnyAccessFailures = true;
break;
case DelayedDiagnostic::ForbiddenType:
@@ -7564,7 +7935,8 @@ class DiagnoseUnguardedAvailability
SmallVector<VersionTuple, 8> AvailabilityStack;
SmallVector<const Stmt *, 16> StmtStack;
- void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range);
+ void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range,
+ ObjCInterfaceDecl *ClassReceiver = nullptr);
public:
DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
@@ -7606,27 +7978,33 @@ public:
}
bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
- if (ObjCMethodDecl *D = Msg->getMethodDecl())
+ if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
+ ObjCInterfaceDecl *ID = nullptr;
+ QualType ReceiverTy = Msg->getClassReceiver();
+ if (!ReceiverTy.isNull() && ReceiverTy->getAsObjCInterfaceType())
+ ID = ReceiverTy->getAsObjCInterfaceType()->getInterface();
+
DiagnoseDeclAvailability(
- D, SourceRange(Msg->getSelectorStartLoc(), Msg->getLocEnd()));
+ D, SourceRange(Msg->getSelectorStartLoc(), Msg->getEndLoc()), ID);
+ }
return true;
}
bool VisitDeclRefExpr(DeclRefExpr *DRE) {
DiagnoseDeclAvailability(DRE->getDecl(),
- SourceRange(DRE->getLocStart(), DRE->getLocEnd()));
+ SourceRange(DRE->getBeginLoc(), DRE->getEndLoc()));
return true;
}
bool VisitMemberExpr(MemberExpr *ME) {
DiagnoseDeclAvailability(ME->getMemberDecl(),
- SourceRange(ME->getLocStart(), ME->getLocEnd()));
+ SourceRange(ME->getBeginLoc(), ME->getEndLoc()));
return true;
}
bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
- SemaRef.Diag(E->getLocStart(), diag::warn_at_available_unchecked_use)
- << (!SemaRef.getLangOpts().ObjC1);
+ SemaRef.Diag(E->getBeginLoc(), diag::warn_at_available_unchecked_use)
+ << (!SemaRef.getLangOpts().ObjC);
return true;
}
@@ -7634,11 +8012,11 @@ public:
};
void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
- NamedDecl *D, SourceRange Range) {
+ NamedDecl *D, SourceRange Range, ObjCInterfaceDecl *ReceiverClass) {
AvailabilityResult Result;
const NamedDecl *OffendingDecl;
std::tie(Result, OffendingDecl) =
- ShouldDiagnoseAvailabilityOfDecl(D, nullptr);
+ ShouldDiagnoseAvailabilityOfDecl(SemaRef, D, nullptr, ReceiverClass);
if (Result != AR_Available) {
// All other diagnostic kinds have already been handled in
// DiagnoseAvailabilityOfDecl.
@@ -7654,7 +8032,8 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
// If the context of this function is less available than D, we should not
// emit a diagnostic.
- if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx))
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx,
+ OffendingDecl))
return;
// We would like to emit the diagnostic even if -Wunguarded-availability is
@@ -7668,21 +8047,24 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
? diag::warn_unguarded_availability_new
: diag::warn_unguarded_availability;
+ std::string PlatformName = AvailabilityAttr::getPrettyPlatformName(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName());
+
SemaRef.Diag(Range.getBegin(), DiagKind)
- << Range << D
- << AvailabilityAttr::getPrettyPlatformName(
- SemaRef.getASTContext().getTargetInfo().getPlatformName())
- << Introduced.getAsString();
+ << Range << D << PlatformName << Introduced.getAsString();
SemaRef.Diag(OffendingDecl->getLocation(),
- diag::note_availability_specified_here)
- << OffendingDecl << /* partial */ 3;
+ diag::note_partial_availability_specified_here)
+ << OffendingDecl << PlatformName << Introduced.getAsString()
+ << SemaRef.Context.getTargetInfo()
+ .getPlatformMinVersion()
+ .getAsString();
auto FixitDiag =
SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
<< Range << D
- << (SemaRef.getLangOpts().ObjC1 ? /*@available*/ 0
- : /*__builtin_available*/ 1);
+ << (SemaRef.getLangOpts().ObjC ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
// Find the statement which should be enclosed in the if @available check.
if (StmtStack.empty())
@@ -7714,10 +8096,10 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
const SourceManager &SM = SemaRef.getSourceManager();
SourceLocation IfInsertionLoc =
- SM.getExpansionLoc(StmtOfUse->getLocStart());
+ SM.getExpansionLoc(StmtOfUse->getBeginLoc());
SourceLocation StmtEndLoc =
SM.getExpansionRange(
- (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getLocEnd())
+ (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getEndLoc())
.getEnd();
if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
return;
@@ -7726,8 +8108,8 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
const char *ExtraIndentation = " ";
std::string FixItString;
llvm::raw_string_ostream FixItOS(FixItString);
- FixItOS << "if (" << (SemaRef.getLangOpts().ObjC1 ? "@available"
- : "__builtin_available")
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC ? "@available"
+ : "__builtin_available")
<< "("
<< AvailabilityAttr::getPlatformNameSourceSpelling(
SemaRef.getASTContext().getTargetInfo().getPlatformName())
@@ -7820,12 +8202,14 @@ void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
- bool AvoidPartialAvailabilityChecks) {
+ bool AvoidPartialAvailabilityChecks,
+ ObjCInterfaceDecl *ClassReceiver) {
std::string Message;
AvailabilityResult Result;
const NamedDecl* OffendingDecl;
// See if this declaration is unavailable, deprecated, or partial.
- std::tie(Result, OffendingDecl) = ShouldDiagnoseAvailabilityOfDecl(D, &Message);
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(*this, D, &Message, ClassReceiver);
if (Result == AR_Available)
return;
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 4cf3abdf5745..43b289d8d0de 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -94,17 +94,17 @@ namespace {
// evaluated. Parameters of a function declared before a default
// argument expression are in scope and can hide namespace and
// class member names.
- return S->Diag(DRE->getLocStart(),
+ return S->Diag(DRE->getBeginLoc(),
diag::err_param_default_argument_references_param)
- << Param->getDeclName() << DefaultArg->getSourceRange();
+ << Param->getDeclName() << DefaultArg->getSourceRange();
} else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
// C++ [dcl.fct.default]p7
// Local variables shall not be used in default argument
// expressions.
if (VDecl->isLocalVarDecl())
- return S->Diag(DRE->getLocStart(),
+ return S->Diag(DRE->getBeginLoc(),
diag::err_param_default_argument_references_local)
- << VDecl->getDeclName() << DefaultArg->getSourceRange();
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
}
return false;
@@ -115,9 +115,9 @@ namespace {
// C++ [dcl.fct.default]p8:
// The keyword this shall not be used in a default argument of a
// member function.
- return S->Diag(ThisE->getLocStart(),
+ return S->Diag(ThisE->getBeginLoc(),
diag::err_param_default_argument_references_this)
- << ThisE->getSourceRange();
+ << ThisE->getSourceRange();
}
bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
@@ -144,8 +144,7 @@ namespace {
if (Lambda->capture_begin() == Lambda->capture_end())
return false;
- return S->Diag(Lambda->getLocStart(),
- diag::err_lambda_capture_default_arg);
+ return S->Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
}
}
@@ -1108,7 +1107,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
// [dcl.decomp]p3:
// The unqualified-id get is looked up in the scope of E by class member
- // access lookup
+ // access lookup ...
LookupResult MemberGet(S, GetDN, Src->getLocation(), Sema::LookupMemberName);
bool UseMemberGet = false;
if (S.isCompleteType(Src->getLocation(), DecompType)) {
@@ -1116,7 +1115,20 @@ static bool checkTupleLikeDecomposition(Sema &S,
S.LookupQualifiedName(MemberGet, RD);
if (MemberGet.isAmbiguous())
return true;
- UseMemberGet = !MemberGet.empty();
+ // ... and if that finds at least one declaration that is a function
+ // template whose first template parameter is a non-type parameter ...
+ for (NamedDecl *D : MemberGet) {
+ if (FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(D->getUnderlyingDecl())) {
+ TemplateParameterList *TPL = FTD->getTemplateParameters();
+ if (TPL->size() != 0 &&
+ isa<NonTypeTemplateParmDecl>(TPL->getParam(0))) {
+ // ... the initializer is e.get<i>().
+ UseMemberGet = true;
+ break;
+ }
+ }
+ }
S.FilterAcceptableTemplateNames(MemberGet);
}
@@ -1193,7 +1205,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
E = Seq.Perform(S, Entity, Kind, Init);
if (E.isInvalid())
return true;
- E = S.ActOnFinishFullExpr(E.get(), Loc);
+ E = S.ActOnFinishFullExpr(E.get(), Loc, /*DiscardedValue*/ false);
if (E.isInvalid())
return true;
RefVD->setInit(E.get());
@@ -1215,16 +1227,16 @@ static bool checkTupleLikeDecomposition(Sema &S,
/// Find the base class to decompose in a built-in decomposition of a class type.
/// This base class search is, unfortunately, not quite like any other that we
/// perform anywhere else in C++.
-static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
- SourceLocation Loc,
- const CXXRecordDecl *RD,
- CXXCastPath &BasePath) {
+static DeclAccessPair findDecomposableBaseClass(Sema &S, SourceLocation Loc,
+ const CXXRecordDecl *RD,
+ CXXCastPath &BasePath) {
auto BaseHasFields = [](const CXXBaseSpecifier *Specifier,
CXXBasePath &Path) {
return Specifier->getType()->getAsCXXRecordDecl()->hasDirectFields();
};
const CXXRecordDecl *ClassWithFields = nullptr;
+ AccessSpecifier AS = AS_public;
if (RD->hasDirectFields())
// [dcl.decomp]p4:
// Otherwise, all of E's non-static data members shall be public direct
@@ -1237,7 +1249,7 @@ static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
if (!RD->lookupInBases(BaseHasFields, Paths)) {
// If no classes have fields, just decompose RD itself. (This will work
// if and only if zero bindings were provided.)
- return RD;
+ return DeclAccessPair::make(const_cast<CXXRecordDecl*>(RD), AS_public);
}
CXXBasePath *BestPath = nullptr;
@@ -1250,7 +1262,7 @@ static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
S.Diag(Loc, diag::err_decomp_decl_multiple_bases_with_members)
<< false << RD << BestPath->back().Base->getType()
<< P.back().Base->getType();
- return nullptr;
+ return DeclAccessPair();
} else if (P.Access < BestPath->Access) {
BestPath = &P;
}
@@ -1261,23 +1273,13 @@ static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
if (Paths.isAmbiguous(S.Context.getCanonicalType(BaseType))) {
S.Diag(Loc, diag::err_decomp_decl_ambiguous_base)
<< RD << BaseType << S.getAmbiguousPathsDisplayString(Paths);
- return nullptr;
+ return DeclAccessPair();
}
- // ... public base class of E.
- if (BestPath->Access != AS_public) {
- S.Diag(Loc, diag::err_decomp_decl_non_public_base)
- << RD << BaseType;
- for (auto &BS : *BestPath) {
- if (BS.Base->getAccessSpecifier() != AS_public) {
- S.Diag(BS.Base->getLocStart(), diag::note_access_constrained_by_path)
- << (BS.Base->getAccessSpecifier() == AS_protected)
- << (BS.Base->getAccessSpecifierAsWritten() == AS_none);
- break;
- }
- }
- return nullptr;
- }
+ // ... [accessible, implied by other rules] base class of E.
+ S.CheckBaseClassAccess(Loc, BaseType, S.Context.getRecordType(RD),
+ *BestPath, diag::err_decomp_decl_inaccessible_base);
+ AS = BestPath->Access;
ClassWithFields = BaseType->getAsCXXRecordDecl();
S.BuildBasePathArray(Paths, BasePath);
@@ -1290,17 +1292,19 @@ static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
S.Diag(Loc, diag::err_decomp_decl_multiple_bases_with_members)
<< (ClassWithFields == RD) << RD << ClassWithFields
<< Paths.front().back().Base->getType();
- return nullptr;
+ return DeclAccessPair();
}
- return ClassWithFields;
+ return DeclAccessPair::make(const_cast<CXXRecordDecl*>(ClassWithFields), AS);
}
static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
ValueDecl *Src, QualType DecompType,
- const CXXRecordDecl *RD) {
+ const CXXRecordDecl *OrigRD) {
CXXCastPath BasePath;
- RD = findDecomposableBaseClass(S, Src->getLocation(), RD, BasePath);
+ DeclAccessPair BasePair =
+ findDecomposableBaseClass(S, Src->getLocation(), OrigRD, BasePath);
+ const CXXRecordDecl *RD = cast_or_null<CXXRecordDecl>(BasePair.getDecl());
if (!RD)
return true;
QualType BaseType = S.Context.getQualifiedType(S.Context.getRecordType(RD),
@@ -1317,7 +1321,8 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
return true;
};
- // all of E's non-static data members shall be public [...] members,
+ // all of E's non-static data members shall be [...] well-formed
+ // when named as e.name in the context of the structured binding,
// E shall not have an anonymous union member, ...
unsigned I = 0;
for (auto *FD : RD->fields()) {
@@ -1335,26 +1340,16 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
if (I >= Bindings.size())
return DiagnoseBadNumberOfBindings();
auto *B = Bindings[I++];
-
SourceLocation Loc = B->getLocation();
- if (FD->getAccess() != AS_public) {
- S.Diag(Loc, diag::err_decomp_decl_non_public_member) << FD << DecompType;
- // Determine whether the access specifier was explicit.
- bool Implicit = true;
- for (const auto *D : RD->decls()) {
- if (declaresSameEntity(D, FD))
- break;
- if (isa<AccessSpecDecl>(D)) {
- Implicit = false;
- break;
- }
- }
-
- S.Diag(FD->getLocation(), diag::note_access_natural)
- << (FD->getAccess() == AS_protected) << Implicit;
- return true;
- }
+ // The field must be accessible in the context of the structured binding.
+ // We already checked that the base class is accessible.
+ // FIXME: Add 'const' to AccessedEntity's classes so we can remove the
+ // const_cast here.
+ S.CheckStructuredBindingMemberAccess(
+ Loc, const_cast<CXXRecordDecl *>(OrigRD),
+ DeclAccessPair::make(FD, CXXRecordDecl::MergeAccess(
+ BasePair.getAccess(), FD->getAccess())));
// Initialize the binding to Src.FD.
ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
@@ -1606,8 +1601,8 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
<< isa<CXXConstructorDecl>(NewFD)
<< getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
for (const auto &I : RD->vbases())
- Diag(I.getLocStart(),
- diag::note_constexpr_virtual_base_here) << I.getSourceRange();
+ Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here)
+ << I.getSourceRange();
return false;
}
}
@@ -1691,11 +1686,11 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
case Decl::CXXRecord:
// C++1y allows types to be defined, not just declared.
if (cast<TagDecl>(DclIt)->isThisDeclarationADefinition())
- SemaRef.Diag(DS->getLocStart(),
+ SemaRef.Diag(DS->getBeginLoc(),
SemaRef.getLangOpts().CPlusPlus14
- ? diag::warn_cxx11_compat_constexpr_type_definition
- : diag::ext_constexpr_type_definition)
- << isa<CXXConstructorDecl>(Dcl);
+ ? diag::warn_cxx11_compat_constexpr_type_definition
+ : diag::ext_constexpr_type_definition)
+ << isa<CXXConstructorDecl>(Dcl);
continue;
case Decl::EnumConstant:
@@ -1746,12 +1741,12 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
// These are disallowed in C++11 and permitted in C++1y. Allow them
// everywhere as an extension.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = DS->getLocStart();
+ Cxx1yLoc = DS->getBeginLoc();
continue;
default:
- SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_body_invalid_stmt)
- << isa<CXXConstructorDecl>(Dcl);
+ SemaRef.Diag(DS->getBeginLoc(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
return false;
}
}
@@ -1808,7 +1803,7 @@ static void CheckConstexprCtorInitializer(Sema &SemaRef,
static bool
CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
SmallVectorImpl<SourceLocation> &ReturnStmts,
- SourceLocation &Cxx1yLoc) {
+ SourceLocation &Cxx1yLoc, SourceLocation &Cxx2aLoc) {
// - its function-body shall be [...] a compound-statement that contains only
switch (S->getStmtClass()) {
case Stmt::NullStmtClass:
@@ -1830,22 +1825,22 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
if (isa<CXXConstructorDecl>(Dcl)) {
// C++1y allows return statements in constexpr constructors.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
return true;
}
- ReturnStmts.push_back(S->getLocStart());
+ ReturnStmts.push_back(S->getBeginLoc());
return true;
case Stmt::CompoundStmtClass: {
// C++1y allows compound-statements.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
CompoundStmt *CompStmt = cast<CompoundStmt>(S);
for (auto *BodyIt : CompStmt->body()) {
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
}
return true;
@@ -1853,21 +1848,21 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
case Stmt::AttributedStmtClass:
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
return true;
case Stmt::IfStmtClass: {
// C++1y allows if-statements.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
IfStmt *If = cast<IfStmt>(S);
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
if (If->getElse() &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
return true;
}
@@ -1882,11 +1877,11 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
if (!SemaRef.getLangOpts().CPlusPlus14)
break;
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
for (Stmt *SubStmt : S->children())
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
return false;
return true;
@@ -1897,12 +1892,32 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
// C++1y allows switch-statements, and since they don't need variable
// mutation, we can reasonably allow them in C++11 as an extension.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
for (Stmt *SubStmt : S->children())
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc))
+ Cxx1yLoc, Cxx2aLoc))
+ return false;
+ return true;
+
+ case Stmt::CXXTryStmtClass:
+ if (Cxx2aLoc.isInvalid())
+ Cxx2aLoc = S->getBeginLoc();
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc))
return false;
+ }
+ return true;
+
+ case Stmt::CXXCatchStmtClass:
+ // Do not bother checking the language mode (already covered by the
+ // try block check).
+ if (!CheckConstexprFunctionStmt(SemaRef, Dcl,
+ cast<CXXCatchStmt>(S)->getHandlerBlock(),
+ ReturnStmts, Cxx1yLoc, Cxx2aLoc))
+ return false;
return true;
default:
@@ -1911,12 +1926,12 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
// C++1y allows expression-statements.
if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getLocStart();
+ Cxx1yLoc = S->getBeginLoc();
return true;
}
- SemaRef.Diag(S->getLocStart(), diag::err_constexpr_body_invalid_stmt)
- << isa<CXXConstructorDecl>(Dcl);
+ SemaRef.Diag(S->getBeginLoc(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
return false;
}
@@ -1925,6 +1940,8 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
///
/// \return true if the body is OK, false if we have diagnosed a problem.
bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
+ SmallVector<SourceLocation, 4> ReturnStmts;
+
if (isa<CXXTryStmt>(Body)) {
// C++11 [dcl.constexpr]p3:
// The definition of a constexpr function shall satisfy the following
@@ -1935,22 +1952,35 @@ bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
// C++11 [dcl.constexpr]p4:
// In the definition of a constexpr constructor, [...]
// - its function-body shall not be a function-try-block;
- Diag(Body->getLocStart(), diag::err_constexpr_function_try_block)
- << isa<CXXConstructorDecl>(Dcl);
- return false;
+ //
+ // This restriction is lifted in C++2a, as long as inner statements also
+ // apply the general constexpr rules.
+ Diag(Body->getBeginLoc(),
+ !getLangOpts().CPlusPlus2a
+ ? diag::ext_constexpr_function_try_block_cxx2a
+ : diag::warn_cxx17_compat_constexpr_function_try_block)
+ << isa<CXXConstructorDecl>(Dcl);
}
- SmallVector<SourceLocation, 4> ReturnStmts;
-
// - its function-body shall be [...] a compound-statement that contains only
// [... list of cases ...]
- CompoundStmt *CompBody = cast<CompoundStmt>(Body);
- SourceLocation Cxx1yLoc;
- for (auto *BodyIt : CompBody->body()) {
- if (!CheckConstexprFunctionStmt(*this, Dcl, BodyIt, ReturnStmts, Cxx1yLoc))
+ //
+ // Note that walking the children here is enough to properly check for
+ // CompoundStmt and CXXTryStmt body.
+ SourceLocation Cxx1yLoc, Cxx2aLoc;
+ for (Stmt *SubStmt : Body->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(*this, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc))
return false;
}
+ if (Cxx2aLoc.isValid())
+ Diag(Cxx2aLoc,
+ getLangOpts().CPlusPlus2a
+ ? diag::warn_cxx17_compat_constexpr_body_invalid_stmt
+ : diag::ext_constexpr_body_invalid_stmt_cxx2a)
+ << isa<CXXConstructorDecl>(Dcl);
if (Cxx1yLoc.isValid())
Diag(Cxx1yLoc,
getLangOpts().CPlusPlus14
@@ -2317,8 +2347,8 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
continue;
Diag(AL.getLoc(), AL.getKind() == ParsedAttr::UnknownAttribute
- ? diag::warn_unknown_attribute_ignored
- : diag::err_base_specifier_attribute)
+ ? (unsigned)diag::warn_unknown_attribute_ignored
+ : (unsigned)diag::err_base_specifier_attribute)
<< AL.getName();
}
@@ -2395,10 +2425,8 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
// C++ [class.mi]p3:
// A class shall not be specified as a direct base class of a
// derived class more than once.
- Diag(Bases[idx]->getLocStart(),
- diag::err_duplicate_base_class)
- << KnownBase->getType()
- << Bases[idx]->getSourceRange();
+ Diag(Bases[idx]->getBeginLoc(), diag::err_duplicate_base_class)
+ << KnownBase->getType() << Bases[idx]->getSourceRange();
// Delete the duplicate base class specifier; we're going to
// overwrite its pointer later.
@@ -2421,9 +2449,9 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
KnownBase->getAccessSpecifier() != AS_public)) {
// The Microsoft extension __interface does not permit bases that
// are not themselves public interfaces.
- Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface)
- << getRecordDiagFromTagKind(RD->getTagKind()) << RD
- << RD->getSourceRange();
+ Diag(KnownBase->getBeginLoc(), diag::err_invalid_base_in_interface)
+ << getRecordDiagFromTagKind(RD->getTagKind()) << RD
+ << RD->getSourceRange();
Invalid = true;
}
if (RD->hasAttr<WeakAttr>())
@@ -2457,9 +2485,9 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
(void)found;
if (Paths.isAmbiguous(CanonicalBase))
- Diag(Bases[idx]->getLocStart (), diag::warn_inaccessible_base_class)
- << BaseType << getAmbiguousPathsDisplayString(Paths)
- << Bases[idx]->getSourceRange();
+ Diag(Bases[idx]->getBeginLoc(), diag::warn_inaccessible_base_class)
+ << BaseType << getAmbiguousPathsDisplayString(Paths)
+ << Bases[idx]->getSourceRange();
else
assert(Bases[idx]->isVirtual());
}
@@ -2842,7 +2870,8 @@ static const ParsedAttr *getMSPropertyAttr(const ParsedAttributesView &list) {
// Check if there is a field shadowing.
void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
- const CXXRecordDecl *RD) {
+ const CXXRecordDecl *RD,
+ bool DeclIsField) {
if (Diags.isIgnored(diag::warn_shadow_field, Loc))
return;
@@ -2882,7 +2911,7 @@ void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
if (AS_none !=
CXXRecordDecl::MergeAccess(P.Access, BaseField->getAccess())) {
Diag(Loc, diag::warn_shadow_field)
- << FieldName << RD << Base;
+ << FieldName << RD << Base << DeclIsField;
Diag(BaseField->getLocation(), diag::note_shadow_field);
Bases.erase(It);
}
@@ -2906,7 +2935,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// For anonymous bitfields, the location should point to the type.
if (Loc.isInvalid())
- Loc = D.getLocStart();
+ Loc = D.getBeginLoc();
Expr *BitWidth = static_cast<Expr*>(BW);
@@ -3144,17 +3173,17 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
if (auto *DG = dyn_cast<CXXDeductionGuideDecl>(NonTemplateMember)) {
auto *TD = DG->getDeducedTemplate();
if (AS != TD->getAccess()) {
- Diag(DG->getLocStart(), diag::err_deduction_guide_wrong_access);
- Diag(TD->getLocStart(), diag::note_deduction_guide_template_access)
- << TD->getAccess();
+ Diag(DG->getBeginLoc(), diag::err_deduction_guide_wrong_access);
+ Diag(TD->getBeginLoc(), diag::note_deduction_guide_template_access)
+ << TD->getAccess();
const AccessSpecDecl *LastAccessSpec = nullptr;
for (const auto *D : cast<CXXRecordDecl>(CurContext)->decls()) {
if (const auto *AccessSpec = dyn_cast<AccessSpecDecl>(D))
LastAccessSpec = AccessSpec;
}
assert(LastAccessSpec && "differing access with no access specifier");
- Diag(LastAccessSpec->getLocStart(), diag::note_deduction_guide_access)
- << AS;
+ Diag(LastAccessSpec->getBeginLoc(), diag::note_deduction_guide_access)
+ << AS;
}
}
}
@@ -3239,7 +3268,7 @@ namespace {
ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParenImpCasts());
}
- // Binding a reference to an unintialized field is not an
+ // Binding a reference to an uninitialized field is not an
// uninitialized use.
if (CheckReferenceOnly && !ReferenceField)
return true;
@@ -3638,10 +3667,10 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
InitializedEntity::InitializeMemberFromDefaultMemberInitializer(FD);
InitializationKind Kind =
FD->getInClassInitStyle() == ICIS_ListInit
- ? InitializationKind::CreateDirectList(InitExpr->getLocStart(),
- InitExpr->getLocStart(),
- InitExpr->getLocEnd())
- : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc);
+ ? InitializationKind::CreateDirectList(InitExpr->getBeginLoc(),
+ InitExpr->getBeginLoc(),
+ InitExpr->getEndLoc())
+ : InitializationKind::CreateCopy(InitExpr->getBeginLoc(), InitLoc);
InitializationSequence Seq(*this, Entity, Kind, InitExpr);
Init = Seq.Perform(*this, Entity, Kind, InitExpr);
if (Init.isInvalid()) {
@@ -3653,7 +3682,7 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
- Init = ActOnFinishFullExpr(Init.get(), InitLoc);
+ Init = ActOnFinishFullExpr(Init.get(), InitLoc, /*DiscardedValue*/ false);
if (Init.isInvalid()) {
FD->setInvalidDecl();
return;
@@ -3737,8 +3766,7 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc) {
- Expr *List = new (Context) ParenListExpr(Context, LParenLoc,
- Args, RParenLoc);
+ Expr *List = ParenListExpr::Create(Context, LParenLoc, Args, RParenLoc);
return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
DS, IdLoc, List, EllipsisLoc);
}
@@ -3767,6 +3795,22 @@ private:
}
+ValueDecl *Sema::tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
+ CXXScopeSpec &SS,
+ ParsedType TemplateTypeTy,
+ IdentifierInfo *MemberOrBase) {
+ if (SS.getScopeRep() || TemplateTypeTy)
+ return nullptr;
+ DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase);
+ if (Result.empty())
+ return nullptr;
+ ValueDecl *Member;
+ if ((Member = dyn_cast<FieldDecl>(Result.front())) ||
+ (Member = dyn_cast<IndirectFieldDecl>(Result.front())))
+ return Member;
+ return nullptr;
+}
+
/// Handle a C++ member initializer.
MemInitResult
Sema::BuildMemInitializer(Decl *ConstructorD,
@@ -3810,21 +3854,16 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
// of a single identifier refers to the class member. A
// mem-initializer-id for the hidden base class may be specified
// using a qualified name. ]
- if (!SS.getScopeRep() && !TemplateTypeTy) {
- // Look for a member, first.
- DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase);
- if (!Result.empty()) {
- ValueDecl *Member;
- if ((Member = dyn_cast<FieldDecl>(Result.front())) ||
- (Member = dyn_cast<IndirectFieldDecl>(Result.front()))) {
- if (EllipsisLoc.isValid())
- Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
- << MemberOrBase
- << SourceRange(IdLoc, Init->getSourceRange().getEnd());
- return BuildMemberInitializer(Member, Init, IdLoc);
- }
- }
+ // Look for a member, first.
+ if (ValueDecl *Member = tryLookupCtorInitMemberDecl(
+ ClassDecl, SS, TemplateTypeTy, MemberOrBase)) {
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
+ << MemberOrBase
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
+
+ return BuildMemberInitializer(Member, Init, IdLoc);
}
// It didn't name a member, so see if it names a class.
QualType BaseType;
@@ -3908,10 +3947,8 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
const CXXBaseSpecifier *BaseSpec = DirectBaseSpec ? DirectBaseSpec
: VirtualBaseSpec;
- Diag(BaseSpec->getLocStart(),
- diag::note_base_class_specified_here)
- << BaseSpec->getType()
- << BaseSpec->getSourceRange();
+ Diag(BaseSpec->getBeginLoc(), diag::note_base_class_specified_here)
+ << BaseSpec->getType() << BaseSpec->getSourceRange();
TyD = Type;
}
@@ -3990,7 +4027,7 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
nullptr);
InitializationKind Kind =
InitList ? InitializationKind::CreateDirectList(
- IdLoc, Init->getLocStart(), Init->getLocEnd())
+ IdLoc, Init->getBeginLoc(), Init->getEndLoc())
: InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
InitRange.getEnd());
@@ -4003,7 +4040,8 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
- MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin());
+ MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin(),
+ /*DiscardedValue*/ false);
if (MemberInit.isInvalid())
return true;
@@ -4043,7 +4081,7 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
QualType(ClassDecl->getTypeForDecl(), 0));
InitializationKind Kind =
InitList ? InitializationKind::CreateDirectList(
- NameLoc, Init->getLocStart(), Init->getLocEnd())
+ NameLoc, Init->getBeginLoc(), Init->getEndLoc())
: InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
InitRange.getEnd());
InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args);
@@ -4058,8 +4096,8 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
- DelegationInit = ActOnFinishFullExpr(DelegationInit.get(),
- InitRange.getBegin());
+ DelegationInit = ActOnFinishFullExpr(
+ DelegationInit.get(), InitRange.getBegin(), /*DiscardedValue*/ false);
if (DelegationInit.isInvalid())
return true;
@@ -4188,7 +4226,8 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// C++11 [class.base.init]p7:
// The initialization of each base and member constitutes a
// full-expression.
- BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin());
+ BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin(),
+ /*DiscardedValue*/ false);
if (BaseInit.isInvalid())
return true;
@@ -4214,7 +4253,7 @@ static Expr *CastForMoving(Sema &SemaRef, Expr *E, QualType T = QualType()) {
if (T.isNull()) T = E->getType();
QualType TargetType = SemaRef.BuildReferenceType(
T, /*SpelledAsLValue*/false, SourceLocation(), DeclarationName());
- SourceLocation ExprLoc = E->getLocStart();
+ SourceLocation ExprLoc = E->getBeginLoc();
TypeSourceInfo *TargetLoc = SemaRef.Context.getTrivialTypeSourceInfo(
TargetType, ExprLoc);
@@ -5171,10 +5210,9 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
assert(Dtor && "No dtor found for BaseClassDecl!");
// FIXME: caret should be on the start of the class name
- CheckDestructorAccess(Base.getLocStart(), Dtor,
+ CheckDestructorAccess(Base.getBeginLoc(), Dtor,
PDiag(diag::err_access_dtor_base)
- << Base.getType()
- << Base.getSourceRange(),
+ << Base.getType() << Base.getSourceRange(),
Context.getTypeDeclType(ClassDecl));
MarkFunctionReferenced(Location, Dtor);
@@ -5492,6 +5530,9 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
// declaration.
return;
+ if (S.Context.getTargetInfo().getTriple().isWindowsGNUEnvironment())
+ S.MarkVTableUsed(Class->getLocation(), Class, true);
+
for (Decl *Member : Class->decls()) {
// Defined static variables that are members of an exported base
// class must be marked export too.
@@ -5705,8 +5746,28 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
continue;
if (!getDLLAttr(Member)) {
- auto *NewAttr =
- cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ InheritableAttr *NewAttr = nullptr;
+
+ // Do not export/import inline function when -fno-dllexport-inlines is
+ // passed. But add attribute for later local static var check.
+ if (!getLangOpts().DllExportInlines && MD && MD->isInlined() &&
+ TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition) {
+ if (ClassExported) {
+ NewAttr = ::new (getASTContext())
+ DLLExportStaticLocalAttr(ClassAttr->getRange(),
+ getASTContext(),
+ ClassAttr->getSpellingListIndex());
+ } else {
+ NewAttr = ::new (getASTContext())
+ DLLImportStaticLocalAttr(ClassAttr->getRange(),
+ getASTContext(),
+ ClassAttr->getSpellingListIndex());
+ }
+ } else {
+ NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext()));
+ }
+
NewAttr->setInherited(true);
Member->addAttr(NewAttr);
@@ -5825,6 +5886,9 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
if (D->isDependentType() || D->isInvalidDecl())
return false;
+ if (D->hasAttr<TrivialABIAttr>())
+ return true;
+
// Clang <= 4 used the pre-C++11 rule, which ignores move operations.
// The PS4 platform ABI follows the behavior of Clang 3.2.
if (CCK == TargetInfo::CCK_ClangABI4OrPS4)
@@ -6450,20 +6514,29 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// copy operation can take a non-const reference) as an implicit
// declaration, and
// -- not have default arguments.
+ // C++2a changes the second bullet to instead delete the function if it's
+ // defaulted on its first declaration, unless it's "an assignment operator,
+ // and its return type differs or its parameter type is not a reference".
+ bool DeleteOnTypeMismatch = getLangOpts().CPlusPlus2a && First;
+ bool ShouldDeleteForTypeMismatch = false;
unsigned ExpectedParams = 1;
if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
ExpectedParams = 0;
if (MD->getNumParams() != ExpectedParams) {
- // This also checks for default arguments: a copy or move constructor with a
+ // This checks for default arguments: a copy or move constructor with a
// default argument is classified as a default constructor, and assignment
// operations and destructors can't have default arguments.
Diag(MD->getLocation(), diag::err_defaulted_special_member_params)
<< CSM << MD->getSourceRange();
HadError = true;
} else if (MD->isVariadic()) {
- Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic)
- << CSM << MD->getSourceRange();
- HadError = true;
+ if (DeleteOnTypeMismatch)
+ ShouldDeleteForTypeMismatch = true;
+ else {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic)
+ << CSM << MD->getSourceRange();
+ HadError = true;
+ }
}
const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>();
@@ -6478,8 +6551,11 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
// Check for return type matching.
ReturnType = Type->getReturnType();
- QualType ExpectedReturnType =
- Context.getLValueReferenceType(Context.getTypeDeclType(RD));
+
+ QualType DeclType = Context.getTypeDeclType(RD);
+ DeclType = Context.getAddrSpaceQualType(DeclType, MD->getTypeQualifiers().getAddressSpace());
+ QualType ExpectedReturnType = Context.getLValueReferenceType(DeclType);
+
if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type)
<< (CSM == CXXMoveAssignment) << ExpectedReturnType;
@@ -6487,10 +6563,14 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
}
// A defaulted special member cannot have cv-qualifiers.
- if (Type->getTypeQuals()) {
- Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
- << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14;
- HadError = true;
+ if (Type->getTypeQuals().hasConst() || Type->getTypeQuals().hasVolatile()) {
+ if (DeleteOnTypeMismatch)
+ ShouldDeleteForTypeMismatch = true;
+ else {
+ Diag(MD->getLocation(), diag::err_defaulted_special_member_quals)
+ << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14;
+ HadError = true;
+ }
}
}
@@ -6503,23 +6583,30 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
HasConstParam = ReferentType.isConstQualified();
if (ReferentType.isVolatileQualified()) {
- Diag(MD->getLocation(),
- diag::err_defaulted_special_member_volatile_param) << CSM;
- HadError = true;
+ if (DeleteOnTypeMismatch)
+ ShouldDeleteForTypeMismatch = true;
+ else {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_volatile_param) << CSM;
+ HadError = true;
+ }
}
if (HasConstParam && !CanHaveConstParam) {
- if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) {
+ if (DeleteOnTypeMismatch)
+ ShouldDeleteForTypeMismatch = true;
+ else if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) {
Diag(MD->getLocation(),
diag::err_defaulted_special_member_copy_const_param)
<< (CSM == CXXCopyAssignment);
// FIXME: Explain why this special member can't be const.
+ HadError = true;
} else {
Diag(MD->getLocation(),
diag::err_defaulted_special_member_move_const_param)
<< (CSM == CXXMoveAssignment);
+ HadError = true;
}
- HadError = true;
}
} else if (ExpectedParams) {
// A copy assignment operator can take its argument by value, but a
@@ -6542,7 +6629,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
: isa<CXXConstructorDecl>(MD)) &&
MD->isConstexpr() && !Constexpr &&
MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- Diag(MD->getLocStart(), diag::err_incorrect_defaulted_constexpr) << CSM;
+ Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr) << CSM;
// FIXME: Explain why the special member can't be constexpr.
HadError = true;
}
@@ -6556,7 +6643,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
// If the exception specification needs to be instantiated, do so now,
// before we clobber it with an EST_Unevaluated specification below.
if (Type->getExceptionSpecType() == EST_Uninstantiated) {
- InstantiateExceptionSpec(MD->getLocStart(), MD);
+ InstantiateExceptionSpec(MD->getBeginLoc(), MD);
Type = MD->getType()->getAs<FunctionProtoType>();
}
DelayedDefaultedMemberExceptionSpecs.push_back(std::make_pair(MD, Type));
@@ -6581,14 +6668,27 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) {
EPI));
}
- if (ShouldDeleteSpecialMember(MD, CSM)) {
+ if (ShouldDeleteForTypeMismatch || ShouldDeleteSpecialMember(MD, CSM)) {
if (First) {
SetDeclDeleted(MD, MD->getLocation());
+ if (!inTemplateInstantiation() && !HadError) {
+ Diag(MD->getLocation(), diag::warn_defaulted_method_deleted) << CSM;
+ if (ShouldDeleteForTypeMismatch) {
+ Diag(MD->getLocation(), diag::note_deleted_type_mismatch) << CSM;
+ } else {
+ ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/true);
+ }
+ }
+ if (ShouldDeleteForTypeMismatch && !HadError) {
+ Diag(MD->getLocation(),
+ diag::warn_cxx17_compat_defaulted_method_type_mismatch) << CSM;
+ }
} else {
// C++11 [dcl.fct.def.default]p4:
// [For a] user-provided explicitly-defaulted function [...] if such a
// function is implicitly defined as deleted, the program is ill-formed.
Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM;
+ assert(!ShouldDeleteForTypeMismatch && "deleted non-first decl");
ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/true);
HadError = true;
}
@@ -6628,20 +6728,27 @@ void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
}
void Sema::CheckDelayedMemberExceptionSpecs() {
- decltype(DelayedExceptionSpecChecks) Checks;
- decltype(DelayedDefaultedMemberExceptionSpecs) Specs;
+ decltype(DelayedOverridingExceptionSpecChecks) Overriding;
+ decltype(DelayedEquivalentExceptionSpecChecks) Equivalent;
+ decltype(DelayedDefaultedMemberExceptionSpecs) Defaulted;
- std::swap(Checks, DelayedExceptionSpecChecks);
- std::swap(Specs, DelayedDefaultedMemberExceptionSpecs);
+ std::swap(Overriding, DelayedOverridingExceptionSpecChecks);
+ std::swap(Equivalent, DelayedEquivalentExceptionSpecChecks);
+ std::swap(Defaulted, DelayedDefaultedMemberExceptionSpecs);
// Perform any deferred checking of exception specifications for virtual
// destructors.
- for (auto &Check : Checks)
+ for (auto &Check : Overriding)
CheckOverridingFunctionExceptionSpec(Check.first, Check.second);
+ // Perform any deferred checking of exception specifications for befriended
+ // special members.
+ for (auto &Check : Equivalent)
+ CheckEquivalentExceptionSpec(Check.second, Check.first);
+
// Check that any explicitly-defaulted methods have exception specifications
// compatible with their implicit exception specifications.
- for (auto &Spec : Specs)
+ for (auto &Spec : Defaulted)
CheckExplicitlyDefaultedMemberExceptionSpec(Spec.first, Spec.second);
}
@@ -6858,10 +6965,10 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
<< Field << DiagKind << IsDtorCallInCtor;
} else {
CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
- S.Diag(Base->getLocStart(),
+ S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << MD->getParent() << /*IsField*/false
- << Base->getType() << DiagKind << IsDtorCallInCtor;
+ << getEffectiveCSM() << MD->getParent() << /*IsField*/ false
+ << Base->getType() << DiagKind << IsDtorCallInCtor;
}
if (DiagKind == 1)
@@ -6930,10 +7037,10 @@ bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
// FIXME: Check that the base has a usable destructor! Sink this into
// shouldDeleteForClassSubobject.
if (BaseCtor->isDeleted() && Diagnose) {
- S.Diag(Base->getLocStart(),
+ S.Diag(Base->getBeginLoc(),
diag::note_deleted_special_member_class_subobject)
- << getEffectiveCSM() << MD->getParent() << /*IsField*/false
- << Base->getType() << /*Deleted*/1 << /*IsDtorCallInCtor*/false;
+ << getEffectiveCSM() << MD->getParent() << /*IsField*/ false
+ << Base->getType() << /*Deleted*/ 1 << /*IsDtorCallInCtor*/ false;
S.NoteDeletedFunction(BaseCtor);
}
return BaseCtor->isDeleted();
@@ -7080,7 +7187,8 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
// The closure type associated with a lambda-expression has a
// deleted (8.4.3) default constructor and a deleted copy
// assignment operator.
- if (RD->isLambda() &&
+ // C++2a adds back these operators if the lambda has no capture-default.
+ if (RD->isLambda() && !RD->lambdaIsDefaultConstructibleAndAssignable() &&
(CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
if (Diagnose)
Diag(RD->getLocation(), diag::note_lambda_decl);
@@ -7180,8 +7288,17 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
if (getLangOpts().CUDA) {
// We should delete the special member in CUDA mode if target inference
// failed.
- return inferCUDATargetForImplicitSpecialMember(RD, CSM, MD, SMI.ConstArg,
- Diagnose);
+ // For inherited constructors (non-null ICI), CSM may be passed so that MD
+ // is treated as certain special member, which may not reflect what special
+ // member MD really is. However inferCUDATargetForImplicitSpecialMember
+ // expects CSM to match MD, therefore recalculate CSM.
+ assert(ICI || CSM == getSpecialMember(MD));
+ auto RealCSM = CSM;
+ if (ICI)
+ RealCSM = getSpecialMember(MD);
+
+ return inferCUDATargetForImplicitSpecialMember(RD, RealCSM, MD,
+ SMI.ConstArg, Diagnose);
}
return false;
@@ -7544,7 +7661,7 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// -- all the direct base classes have trivial [default constructors or
// destructors]
for (const auto &BI : RD->bases())
- if (!checkTrivialSubobjectCall(*this, BI.getLocStart(), BI.getType(),
+ if (!checkTrivialSubobjectCall(*this, BI.getBeginLoc(), BI.getType(),
ConstArg, CSM, TSK_BaseClass, TAH, Diagnose))
return false;
@@ -7584,14 +7701,14 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
// member in all bases is trivial, so vbases must all be direct.
CXXBaseSpecifier &BS = *RD->vbases_begin();
assert(BS.isVirtual());
- Diag(BS.getLocStart(), diag::note_nontrivial_has_virtual) << RD << 1;
+ Diag(BS.getBeginLoc(), diag::note_nontrivial_has_virtual) << RD << 1;
return false;
}
// Must have a virtual method.
for (const auto *MI : RD->methods()) {
if (MI->isVirtual()) {
- SourceLocation MLoc = MI->getLocStart();
+ SourceLocation MLoc = MI->getBeginLoc();
Diag(MLoc, diag::note_nontrivial_has_virtual) << RD << 0;
return false;
}
@@ -7612,7 +7729,7 @@ struct FindHiddenVirtualMethod {
SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
private:
- /// Check whether any most overriden method from MD in Methods
+ /// Check whether any most overridden method from MD in Methods
static bool CheckMostOverridenMethods(
const CXXMethodDecl *MD,
const llvm::SmallPtrSetImpl<const CXXMethodDecl *> &Methods) {
@@ -7696,7 +7813,7 @@ void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD,
FHVM.Method = MD;
FHVM.S = this;
- // Keep the base methods that were overriden or introduced in the subclass
+ // Keep the base methods that were overridden or introduced in the subclass
// by 'using' in a set. A base method not in this set is hidden.
CXXRecordDecl *DC = MD->getParent();
DeclContext::lookup_result R = DC->lookup(MD->getDeclName());
@@ -8063,16 +8180,12 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
}
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- if (FTI.TypeQuals != 0) {
- if (FTI.TypeQuals & Qualifiers::Const)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
- << "const" << SourceRange(D.getIdentifierLoc());
- if (FTI.TypeQuals & Qualifiers::Volatile)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
- << "volatile" << SourceRange(D.getIdentifierLoc());
- if (FTI.TypeQuals & Qualifiers::Restrict)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
- << "restrict" << SourceRange(D.getIdentifierLoc());
+ if (FTI.hasMethodTypeQualifiers()) {
+ FTI.MethodQualifiers->forEachQualifier(
+ [&](DeclSpec::TQ TypeQual, StringRef QualName, SourceLocation SL) {
+ Diag(SL, diag::err_invalid_qualified_constructor)
+ << QualName << SourceRange(SL);
+ });
D.setInvalidType();
}
@@ -8093,7 +8206,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
return R;
FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
- EPI.TypeQuals = 0;
+ EPI.TypeQuals = Qualifiers();
EPI.RefQualifier = RQ_None;
return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
@@ -8179,6 +8292,7 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
}
}
+ DiagnoseUseOfDecl(OperatorDelete, Loc);
MarkFunctionReferenced(Loc, OperatorDelete);
Destructor->setOperatorDelete(OperatorDelete, ThisArg);
}
@@ -8252,16 +8366,12 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
}
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- if (FTI.TypeQuals != 0 && !D.isInvalidType()) {
- if (FTI.TypeQuals & Qualifiers::Const)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
- << "const" << SourceRange(D.getIdentifierLoc());
- if (FTI.TypeQuals & Qualifiers::Volatile)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
- << "volatile" << SourceRange(D.getIdentifierLoc());
- if (FTI.TypeQuals & Qualifiers::Restrict)
- Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
- << "restrict" << SourceRange(D.getIdentifierLoc());
+ if (FTI.hasMethodTypeQualifiers() && !D.isInvalidType()) {
+ FTI.MethodQualifiers->forEachQualifier(
+ [&](DeclSpec::TQ TypeQual, StringRef QualName, SourceLocation SL) {
+ Diag(SL, diag::err_invalid_qualified_destructor)
+ << QualName << SourceRange(SL);
+ });
D.setInvalidType();
}
@@ -8299,7 +8409,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
EPI.Variadic = false;
- EPI.TypeQuals = 0;
+ EPI.TypeQuals = Qualifiers();
EPI.RefQualifier = RQ_None;
return Context.getFunctionType(Context.VoidTy, None, EPI);
}
@@ -8437,7 +8547,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// If we can provide a correct fix-it hint, do so.
if (After.isInvalid() && ConvTSI) {
SourceLocation InsertLoc =
- getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd());
+ getLocForEndOfToken(ConvTSI->getTypeLoc().getEndLoc());
DB << FixItHint::CreateInsertion(InsertLoc, " ")
<< FixItHint::CreateInsertionFromRange(
InsertLoc, CharSourceRange::getTokenRange(Before))
@@ -8627,13 +8737,13 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
if (Chunk.Kind == DeclaratorChunk::Paren)
continue;
if (Chunk.Kind != DeclaratorChunk::Function || FoundFunction) {
- Diag(D.getDeclSpec().getLocStart(),
- diag::err_deduction_guide_with_complex_decl)
- << D.getSourceRange();
+ Diag(D.getDeclSpec().getBeginLoc(),
+ diag::err_deduction_guide_with_complex_decl)
+ << D.getSourceRange();
break;
}
if (!Chunk.Fun.hasTrailingReturnType()) {
- Diag(D.getName().getLocStart(),
+ Diag(D.getName().getBeginLoc(),
diag::err_deduction_guide_no_trailing_return_type);
break;
}
@@ -8665,10 +8775,11 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
}
if (!AcceptableReturnType) {
- Diag(TSI->getTypeLoc().getLocStart(),
+ Diag(TSI->getTypeLoc().getBeginLoc(),
diag::err_deduction_guide_bad_trailing_return_type)
- << GuidedTemplate << TSI->getType() << MightInstantiateToSpecialization
- << TSI->getTypeLoc().getSourceRange();
+ << GuidedTemplate << TSI->getType()
+ << MightInstantiateToSpecialization
+ << TSI->getTypeLoc().getSourceRange();
}
// Keep going to check that we don't have any inner declarator pieces (we
@@ -9349,7 +9460,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
if (SS.isEmpty()) {
- Diag(Name.getLocStart(), diag::err_using_requires_qualname);
+ Diag(Name.getBeginLoc(), diag::err_using_requires_qualname);
return nullptr;
}
@@ -9364,24 +9475,23 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
case UnqualifiedIdKind::IK_ConstructorName:
case UnqualifiedIdKind::IK_ConstructorTemplateId:
// C++11 inheriting constructors.
- Diag(Name.getLocStart(),
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_using_decl_constructor :
- diag::err_using_decl_constructor)
- << SS.getRange();
+ Diag(Name.getBeginLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_using_decl_constructor
+ : diag::err_using_decl_constructor)
+ << SS.getRange();
if (getLangOpts().CPlusPlus11) break;
return nullptr;
case UnqualifiedIdKind::IK_DestructorName:
- Diag(Name.getLocStart(), diag::err_using_decl_destructor)
- << SS.getRange();
+ Diag(Name.getBeginLoc(), diag::err_using_decl_destructor) << SS.getRange();
return nullptr;
case UnqualifiedIdKind::IK_TemplateId:
- Diag(Name.getLocStart(), diag::err_using_decl_template_id)
- << SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
+ Diag(Name.getBeginLoc(), diag::err_using_decl_template_id)
+ << SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
return nullptr;
case UnqualifiedIdKind::IK_DeductionGuideName:
@@ -9395,10 +9505,10 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
// Warn about access declarations.
if (UsingLoc.isInvalid()) {
- Diag(Name.getLocStart(),
- getLangOpts().CPlusPlus11 ? diag::err_access_decl
- : diag::warn_access_decl_deprecated)
- << FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
+ Diag(Name.getBeginLoc(), getLangOpts().CPlusPlus11
+ ? diag::err_access_decl
+ : diag::warn_access_decl_deprecated)
+ << FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
}
if (EllipsisLoc.isInvalid()) {
@@ -10230,8 +10340,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
" = ");
} else {
// Convert 'using X::Y;' to 'typedef X::Y Y;'.
- SourceLocation InsertLoc =
- getLocForEndOfToken(NameInfo.getLocEnd());
+ SourceLocation InsertLoc = getLocForEndOfToken(NameInfo.getEndLoc());
Diag(InsertLoc, diag::note_using_decl_class_member_workaround)
<< 1 // typedef declaration
<< FixItHint::CreateReplacement(UsingLoc, "typedef")
@@ -10472,7 +10581,8 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
OldDecl->getTemplateParameters(),
/*Complain=*/true,
TPL_TemplateMatch))
- OldTemplateParams = OldDecl->getTemplateParameters();
+ OldTemplateParams =
+ OldDecl->getMostRecentDecl()->getTemplateParameters();
else
Invalid = true;
@@ -10679,19 +10789,48 @@ void SpecialMemberExceptionSpecInfo::visitSubobjectCall(
ExceptSpec.CalledDecl(getSubobjectLoc(Subobj), MD);
}
+namespace {
+/// RAII object to register a special member as being currently declared.
+struct ComputingExceptionSpec {
+ Sema &S;
+
+ ComputingExceptionSpec(Sema &S, CXXMethodDecl *MD, SourceLocation Loc)
+ : S(S) {
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::ExceptionSpecEvaluation;
+ Ctx.PointOfInstantiation = Loc;
+ Ctx.Entity = MD;
+ S.pushCodeSynthesisContext(Ctx);
+ }
+ ~ComputingExceptionSpec() {
+ S.popCodeSynthesisContext();
+ }
+};
+}
+
static Sema::ImplicitExceptionSpecification
ComputeDefaultedSpecialMemberExceptionSpec(
Sema &S, SourceLocation Loc, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM,
Sema::InheritedConstructorInfo *ICI) {
+ ComputingExceptionSpec CES(S, MD, Loc);
+
CXXRecordDecl *ClassDecl = MD->getParent();
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
- SpecialMemberExceptionSpecInfo Info(S, MD, CSM, ICI, Loc);
+ SpecialMemberExceptionSpecInfo Info(S, MD, CSM, ICI, MD->getLocation());
if (ClassDecl->isInvalidDecl())
return Info.ExceptSpec;
+ // FIXME: If this diagnostic fires, we're probably missing a check for
+ // attempting to resolve an exception specification before it's known
+ // at a higher level.
+ if (S.RequireCompleteType(MD->getLocation(),
+ S.Context.getRecordType(ClassDecl),
+ diag::err_exception_spec_incomplete_type))
+ return Info.ExceptSpec;
+
// C++1z [except.spec]p7:
// [Look for exceptions thrown by] a constructor selected [...] to
// initialize a potentially constructed subobject,
@@ -10774,6 +10913,22 @@ void Sema::CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD) {
CheckFunctionDeclaration(S, FD, R, /*IsMemberSpecialization*/false);
}
+void Sema::setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
+ QualType ResultTy,
+ ArrayRef<QualType> Args) {
+ // Build an exception specification pointing back at this constructor.
+ FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, SpecialMem);
+
+ if (getLangOpts().OpenCLCPlusPlus) {
+ // OpenCL: Implicitly defaulted special member are of the generic address
+ // space.
+ EPI.TypeQuals.addAddressSpace(LangAS::opencl_generic);
+ }
+
+ auto QT = Context.getFunctionType(ResultTy, Args, EPI);
+ SpecialMem->setType(QT);
+}
+
CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl) {
// C++ [class.ctor]p5:
@@ -10814,9 +10969,7 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this constructor.
- FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, DefaultCon);
- DefaultCon->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+ setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for default
// constructors is easy to compute.
@@ -10866,8 +11019,8 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
return;
}
- SourceLocation Loc = Constructor->getLocEnd().isValid()
- ? Constructor->getLocEnd()
+ SourceLocation Loc = Constructor->getEndLoc().isValid()
+ ? Constructor->getEndLoc()
: Constructor->getLocation();
Constructor->setBody(new (Context) CompoundStmt(Loc));
Constructor->markUsed(Context);
@@ -11087,9 +11240,7 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this destructor.
- FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, Destructor);
- Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+ setupImplicitSpecialMemberType(Destructor, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for
// destructors is easy to compute.
@@ -11149,8 +11300,8 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
return;
}
- SourceLocation Loc = Destructor->getLocEnd().isValid()
- ? Destructor->getLocEnd()
+ SourceLocation Loc = Destructor->getEndLoc().isValid()
+ ? Destructor->getEndLoc()
: Destructor->getLocation();
Destructor->setBody(new (Context) CompoundStmt(Loc));
Destructor->markUsed(Context);
@@ -11166,8 +11317,9 @@ void Sema::ActOnFinishCXXMemberDecls() {
// If the context is an invalid C++ class, just suppress these checks.
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(CurContext)) {
if (Record->isInvalidDecl()) {
+ DelayedOverridingExceptionSpecChecks.clear();
+ DelayedEquivalentExceptionSpecChecks.clear();
DelayedDefaultedMemberExceptionSpecs.clear();
- DelayedExceptionSpecChecks.clear();
return;
}
checkForMultipleExportedDefaultConstructors(*this, Record);
@@ -11189,11 +11341,13 @@ void Sema::referenceDLLExportedClassMethods() {
}
}
-void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
- CXXDestructorDecl *Destructor) {
+void Sema::AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor) {
assert(getLangOpts().CPlusPlus11 &&
"adjusting dtor exception specs was introduced in c++11");
+ if (Destructor->isDependentContext())
+ return;
+
// C++11 [class.dtor]p3:
// A declaration of a destructor that does not have an exception-
// specification is implicitly considered to have the same exception-
@@ -11660,6 +11814,10 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
bool Const = ClassDecl->implicitCopyAssignmentHasConstParam();
if (Const)
ArgType = ArgType.withConst();
+
+ if (Context.getLangOpts().OpenCLCPlusPlus)
+ ArgType = Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
+
ArgType = Context.getLValueReferenceType(ArgType);
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
@@ -11686,10 +11844,7 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this member.
- FunctionProtoType::ExtProtoInfo EPI =
- getImplicitMethodEPI(*this, CopyAssignment);
- CopyAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI));
+ setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
@@ -11821,8 +11976,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
}
// Our location for everything implicitly-generated.
- SourceLocation Loc = CopyAssignOperator->getLocEnd().isValid()
- ? CopyAssignOperator->getLocEnd()
+ SourceLocation Loc = CopyAssignOperator->getEndLoc().isValid()
+ ? CopyAssignOperator->getEndLoc()
: CopyAssignOperator->getLocation();
// Builds a DeclRefExpr for the "other" object.
@@ -11853,7 +12008,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Dereference "this".
DerefBuilder DerefThis(This);
CastBuilder To(DerefThis,
- Context.getCVRQualifiedType(
+ Context.getQualifiedType(
BaseType, CopyAssignOperator->getTypeQualifiers()),
VK_LValue, BasePath);
@@ -12100,14 +12255,14 @@ static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class,
if (Existing && Existing != &BI) {
S.Diag(CurrentLocation, diag::warn_vbase_moved_multiple_times)
<< Class << Base;
- S.Diag(Existing->getLocStart(), diag::note_vbase_moved_here)
- << (Base->getCanonicalDecl() ==
- Existing->getType()->getAsCXXRecordDecl()->getCanonicalDecl())
- << Base << Existing->getType() << Existing->getSourceRange();
- S.Diag(BI.getLocStart(), diag::note_vbase_moved_here)
- << (Base->getCanonicalDecl() ==
- BI.getType()->getAsCXXRecordDecl()->getCanonicalDecl())
- << Base << BI.getType() << BaseSpec->getSourceRange();
+ S.Diag(Existing->getBeginLoc(), diag::note_vbase_moved_here)
+ << (Base->getCanonicalDecl() ==
+ Existing->getType()->getAsCXXRecordDecl()->getCanonicalDecl())
+ << Base << Existing->getType() << Existing->getSourceRange();
+ S.Diag(BI.getBeginLoc(), diag::note_vbase_moved_here)
+ << (Base->getCanonicalDecl() ==
+ BI.getType()->getAsCXXRecordDecl()->getCanonicalDecl())
+ << Base << BI.getType() << BaseSpec->getSourceRange();
// Only diagnose each vbase once.
Existing = nullptr;
@@ -12173,12 +12328,10 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0);
QualType OtherRefType = Other->getType()->
getAs<RValueReferenceType>()->getPointeeType();
- assert(!OtherRefType.getQualifiers() &&
- "Bad argument type of defaulted move assignment");
// Our location for everything implicitly-generated.
- SourceLocation Loc = MoveAssignOperator->getLocEnd().isValid()
- ? MoveAssignOperator->getLocEnd()
+ SourceLocation Loc = MoveAssignOperator->getEndLoc().isValid()
+ ? MoveAssignOperator->getEndLoc()
: MoveAssignOperator->getLocation();
// Builds a reference to the "other" object.
@@ -12220,7 +12373,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Implicitly cast "this" to the appropriately-qualified base type.
CastBuilder To(DerefThis,
- Context.getCVRQualifiedType(
+ Context.getQualifiedType(
BaseType, MoveAssignOperator->getTypeQualifiers()),
VK_LValue, BasePath);
@@ -12356,6 +12509,10 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
bool Const = ClassDecl->implicitCopyConstructorHasConstParam();
if (Const)
ArgType = ArgType.withConst();
+
+ if (Context.getLangOpts().OpenCLCPlusPlus)
+ ArgType = Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
+
ArgType = Context.getLValueReferenceType(ArgType);
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
@@ -12384,11 +12541,7 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this member.
- FunctionProtoType::ExtProtoInfo EPI =
- getImplicitMethodEPI(*this, CopyConstructor);
- CopyConstructor->setType(
- Context.getFunctionType(Context.VoidTy, ArgType, EPI));
+ setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
@@ -12462,8 +12615,8 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
if (SetCtorInitializers(CopyConstructor, /*AnyErrors=*/false)) {
CopyConstructor->setInvalidDecl();
} else {
- SourceLocation Loc = CopyConstructor->getLocEnd().isValid()
- ? CopyConstructor->getLocEnd()
+ SourceLocation Loc = CopyConstructor->getEndLoc().isValid()
+ ? CopyConstructor->getEndLoc()
: CopyConstructor->getLocation();
Sema::CompoundScopeRAII CompoundScope(*this);
CopyConstructor->setBody(
@@ -12485,7 +12638,11 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
return nullptr;
QualType ClassType = Context.getTypeDeclType(ClassDecl);
- QualType ArgType = Context.getRValueReferenceType(ClassType);
+
+ QualType ArgType = ClassType;
+ if (Context.getLangOpts().OpenCLCPlusPlus)
+ ArgType = Context.getAddrSpaceQualType(ClassType, LangAS::opencl_generic);
+ ArgType = Context.getRValueReferenceType(ArgType);
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl,
CXXMoveConstructor,
@@ -12514,11 +12671,7 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this member.
- FunctionProtoType::ExtProtoInfo EPI =
- getImplicitMethodEPI(*this, MoveConstructor);
- MoveConstructor->setType(
- Context.getFunctionType(Context.VoidTy, ArgType, EPI));
+ setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
@@ -12585,8 +12738,8 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
if (SetCtorInitializers(MoveConstructor, /*AnyErrors=*/false)) {
MoveConstructor->setInvalidDecl();
} else {
- SourceLocation Loc = MoveConstructor->getLocEnd().isValid()
- ? MoveConstructor->getLocEnd()
+ SourceLocation Loc = MoveConstructor->getEndLoc().isValid()
+ ? MoveConstructor->getEndLoc()
: MoveConstructor->getLocation();
Sema::CompoundScopeRAII CompoundScope(*this);
MoveConstructor->setBody(ActOnCompoundStmt(
@@ -12889,7 +13042,7 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
Diag(Loc, diag::err_in_class_initializer_not_yet_parsed)
<< OutermostClass << Field;
- Diag(Field->getLocEnd(), diag::note_in_class_initializer_not_yet_parsed);
+ Diag(Field->getEndLoc(), diag::note_in_class_initializer_not_yet_parsed);
// Recover by marking the field invalid, unless we're in a SFINAE context.
if (!isSFINAEContext())
Field->setInvalidDecl();
@@ -12904,6 +13057,9 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (ClassDecl->hasIrrelevantDestructor()) return;
if (ClassDecl->isDependentContext()) return;
+ if (VD->isNoDestroy(getASTContext()))
+ return;
+
CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
MarkFunctionReferenced(VD->getLocation(), Destructor);
CheckDestructorAccess(VD->getLocation(), Destructor,
@@ -13123,7 +13279,8 @@ CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) {
// C++ P0722:
// A destroying operator delete shall be a usual deallocation function.
if (MD && !MD->getParent()->isDependentContext() &&
- MD->isDestroyingOperatorDelete() && !MD->isUsualDeallocationFunction()) {
+ MD->isDestroyingOperatorDelete() &&
+ !SemaRef.isUsualDeallocationFunction(MD)) {
SemaRef.Diag(MD->getLocation(),
diag::err_destroying_operator_delete_not_usual);
return true;
@@ -13601,7 +13758,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
// Only the non-fragile NeXT runtime currently supports C++ catches
// of ObjC types, and no runtime supports catching ObjC types by value.
- if (!Invalid && getLangOpts().ObjC1) {
+ if (!Invalid && getLangOpts().ObjC) {
QualType T = ExDeclType;
if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
@@ -13711,10 +13868,8 @@ Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
Invalid = true;
}
- VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo,
- D.getLocStart(),
- D.getIdentifierLoc(),
- D.getIdentifier());
+ VarDecl *ExDecl = BuildExceptionDeclaration(
+ S, TInfo, D.getBeginLoc(), D.getIdentifierLoc(), D.getIdentifier());
if (Invalid)
ExDecl->setInvalidDecl();
@@ -13755,6 +13910,8 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
if (Converted.isInvalid())
Failed = true;
+ else
+ Converted = ConstantExpr::Create(Context, Converted.get());
llvm::APSInt Cond;
if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond,
@@ -13771,9 +13928,9 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *InnerCond = nullptr;
std::string InnerCondDescription;
std::tie(InnerCond, InnerCondDescription) =
- findFailedBooleanCondition(Converted.get(),
- /*AllowTopLevelCond=*/false);
- if (InnerCond) {
+ findFailedBooleanCondition(Converted.get());
+ if (InnerCond && !isa<CXXBoolLiteralExpr>(InnerCond)
+ && !isa<IntegerLiteral>(InnerCond)) {
Diag(StaticAssertLoc, diag::err_static_assert_requirement_failed)
<< InnerCondDescription << !AssertMessage
<< Msg.str() << InnerCond->getSourceRange();
@@ -13870,7 +14027,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
// cv-qualified) class type, that class is declared as a friend; otherwise,
// the friend declaration is ignored.
return FriendDecl::Create(Context, CurContext,
- TSInfo->getTypeLoc().getLocStart(), TSInfo,
+ TSInfo->getTypeLoc().getBeginLoc(), TSInfo,
FriendLoc);
}
@@ -14012,11 +14169,34 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
/// template <> template \<class T> friend class A<int>::B;
Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TempParams) {
- SourceLocation Loc = DS.getLocStart();
+ SourceLocation Loc = DS.getBeginLoc();
assert(DS.isFriendSpecified());
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
+ // C++ [class.friend]p3:
+ // A friend declaration that does not declare a function shall have one of
+ // the following forms:
+ // friend elaborated-type-specifier ;
+ // friend simple-type-specifier ;
+ // friend typename-specifier ;
+ //
+ // Any declaration with a type qualifier does not have that form. (It's
+ // legal to specify a qualified type as a friend, you just can't write the
+ // keywords.)
+ if (DS.getTypeQualifiers()) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(), diag::err_friend_decl_spec) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(), diag::err_friend_decl_spec) << "volatile";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(DS.getRestrictSpecLoc(), diag::err_friend_decl_spec) << "restrict";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
+ Diag(DS.getAtomicSpecLoc(), diag::err_friend_decl_spec) << "_Atomic";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_unaligned)
+ Diag(DS.getUnalignedSpecLoc(), diag::err_friend_decl_spec) << "__unaligned";
+ }
+
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
@@ -14123,8 +14303,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
CXXScopeSpec &SS = D.getCXXScopeSpec();
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
- DeclarationName Name = NameInfo.getName();
- assert(Name);
+ assert(NameInfo.getName());
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(Loc, TInfo, UPPC_FriendDeclaration) ||
@@ -14243,25 +14422,6 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
LookupQualifiedName(Previous, DC);
- // Ignore things found implicitly in the wrong scope.
- // TODO: better diagnostics for this case. Suggesting the right
- // qualified scope would be nice...
- LookupResult::Filter F = Previous.makeFilter();
- while (F.hasNext()) {
- NamedDecl *D = F.next();
- if (!DC->InEnclosingNamespaceSetOf(
- D->getDeclContext()->getRedeclContext()))
- F.erase();
- }
- F.done();
-
- if (Previous.empty()) {
- D.setInvalidType();
- Diag(Loc, diag::err_qualified_friend_not_found)
- << Name << TInfo->getType();
- return nullptr;
- }
-
// C++ [class.friend]p1: A friend of a class is a function or
// class that is not a member of the class . . .
if (DC->Equals(CurContext))
@@ -14275,6 +14435,10 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// A function can be defined in a friend declaration of a class if and
// only if the class is a non-local class (9.8), the function name is
// unqualified, and the function has namespace scope.
+ //
+ // FIXME: We should only do this if the scope specifier names the
+ // innermost enclosing namespace; otherwise the fixit changes the
+ // meaning of the code.
SemaDiagnosticBuilder DB
= Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def);
@@ -14532,8 +14696,8 @@ static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
if (!SubStmt)
continue;
if (isa<ReturnStmt>(SubStmt))
- Self.Diag(SubStmt->getLocStart(),
- diag::err_return_in_constructor_handler);
+ Self.Diag(SubStmt->getBeginLoc(),
+ diag::err_return_in_constructor_handler);
if (!isa<Expr>(SubStmt))
SearchForReturnInStmt(Self, SubStmt);
}
@@ -14834,6 +14998,15 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
if (!Class->isDynamicClass() || Class->isDependentContext() ||
CurContext->isDependentContext() || isUnevaluatedContext())
return;
+ // Do not mark as used if compiling for the device outside of the target
+ // region.
+ if (LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ !isInOpenMPDeclareTargetContext() &&
+ !isInOpenMPTargetExecutionDirective()) {
+ if (!DefinitionRequired)
+ MarkVirtualMembersReferenced(Loc, Class);
+ return;
+ }
// Try to insert this class into the map.
LoadExternalVTableUses();
@@ -15439,10 +15612,11 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
PrevDecl = nullptr;
- SourceLocation TSSL = D.getLocStart();
- const ParsedAttr::PropertyData &Data = MSPropertyAttr.getPropertyData();
- MSPropertyDecl *NewPD = MSPropertyDecl::Create(
- Context, Record, Loc, II, T, TInfo, TSSL, Data.GetterId, Data.SetterId);
+ SourceLocation TSSL = D.getBeginLoc();
+ MSPropertyDecl *NewPD =
+ MSPropertyDecl::Create(Context, Record, Loc, II, T, TInfo, TSSL,
+ MSPropertyAttr.getPropertyDataGetter(),
+ MSPropertyAttr.getPropertyDataSetter());
ProcessDeclAttributes(TUScope, NewPD, D);
NewPD->setAccess(AS);
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 595cc76cd4a3..3746bdad0358 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -363,6 +363,8 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
assert((getCurMethodDecl() == nullptr) && "Methodparsing confused");
ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+
// If we don't have a valid method decl, simply return.
if (!MDecl)
return;
@@ -653,7 +655,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
}
IDecl->setSuperClass(SuperClassTInfo);
- IDecl->setEndOfDefinitionLoc(SuperClassTInfo->getTypeLoc().getLocEnd());
+ IDecl->setEndOfDefinitionLoc(SuperClassTInfo->getTypeLoc().getEndLoc());
}
}
@@ -717,21 +719,22 @@ DeclResult Sema::actOnObjCTypeParam(Scope *S,
if (auto attr = qual.getAs<AttributedTypeLoc>()) {
rangeToRemove = attr.getLocalSourceRange();
if (attr.getTypePtr()->getImmediateNullability()) {
- Diag(attr.getLocStart(),
+ Diag(attr.getBeginLoc(),
diag::err_objc_type_param_bound_explicit_nullability)
- << paramName << typeBound
- << FixItHint::CreateRemoval(rangeToRemove);
+ << paramName << typeBound
+ << FixItHint::CreateRemoval(rangeToRemove);
diagnosed = true;
}
}
}
if (!diagnosed) {
- Diag(qual ? qual.getLocStart()
- : typeBoundInfo->getTypeLoc().getLocStart(),
- diag::err_objc_type_param_bound_qualified)
- << paramName << typeBound << typeBound.getQualifiers().getAsString()
- << FixItHint::CreateRemoval(rangeToRemove);
+ Diag(qual ? qual.getBeginLoc()
+ : typeBoundInfo->getTypeLoc().getBeginLoc(),
+ diag::err_objc_type_param_bound_qualified)
+ << paramName << typeBound
+ << typeBound.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
}
// If the type bound has qualifiers other than CVR, we need to strip
@@ -828,7 +831,7 @@ static bool checkTypeParamListConsistency(Sema &S,
if (newTypeParams->size() > prevTypeParams->size()) {
diagLoc = newTypeParams->begin()[prevTypeParams->size()]->getLocation();
} else {
- diagLoc = S.getLocForEndOfToken(newTypeParams->back()->getLocEnd());
+ diagLoc = S.getLocForEndOfToken(newTypeParams->back()->getEndLoc());
}
S.Diag(diagLoc, diag::err_objc_type_param_arity_mismatch)
@@ -865,7 +868,7 @@ static bool checkTypeParamListConsistency(Sema &S,
// Diagnose the conflict and update the second declaration.
SourceLocation diagLoc = newTypeParam->getVarianceLoc();
if (diagLoc.isInvalid())
- diagLoc = newTypeParam->getLocStart();
+ diagLoc = newTypeParam->getBeginLoc();
auto diag = S.Diag(diagLoc,
diag::err_objc_type_param_variance_conflict)
@@ -886,7 +889,7 @@ static bool checkTypeParamListConsistency(Sema &S,
: "__contravariant";
if (newTypeParam->getVariance()
== ObjCTypeParamVariance::Invariant) {
- diag << FixItHint::CreateInsertion(newTypeParam->getLocStart(),
+ diag << FixItHint::CreateInsertion(newTypeParam->getBeginLoc(),
(newVarianceStr + " ").str());
} else {
diag << FixItHint::CreateReplacement(newTypeParam->getVarianceLoc(),
@@ -2164,9 +2167,10 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
} else if (ImplIvar->isBitField() && ClsIvar->isBitField() &&
ImplIvar->getBitWidthValue(Context) !=
ClsIvar->getBitWidthValue(Context)) {
- Diag(ImplIvar->getBitWidth()->getLocStart(),
- diag::err_conflicting_ivar_bitwidth) << ImplIvar->getIdentifier();
- Diag(ClsIvar->getBitWidth()->getLocStart(),
+ Diag(ImplIvar->getBitWidth()->getBeginLoc(),
+ diag::err_conflicting_ivar_bitwidth)
+ << ImplIvar->getIdentifier();
+ Diag(ClsIvar->getBitWidth()->getBeginLoc(),
diag::note_previous_definition);
}
// Make sure the names are identical.
@@ -2206,7 +2210,7 @@ static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
}
// Issue a note to the original declaration.
- SourceLocation MethodLoc = method->getLocStart();
+ SourceLocation MethodLoc = method->getBeginLoc();
if (MethodLoc.isValid())
S.Diag(MethodLoc, diag::note_method_declared_at) << method;
}
@@ -2880,7 +2884,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
IMPDecl, PI, IncompleteImpl, false,
WarnCategoryMethodImpl);
- // FIXME. For now, we are not checking for extact match of methods
+ // FIXME. For now, we are not checking for exact match of methods
// in category implementation and its primary class's super class.
if (!WarnCategoryMethodImpl && I->getSuperClass())
MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
@@ -3580,12 +3584,12 @@ void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &
else
Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
- Diag(Methods[0]->getLocStart(),
+ Diag(Methods[0]->getBeginLoc(),
issueError ? diag::note_possibility : diag::note_using)
- << Methods[0]->getSourceRange();
+ << Methods[0]->getSourceRange();
for (unsigned I = 1, N = Methods.size(); I != N; ++I) {
- Diag(Methods[I]->getLocStart(), diag::note_also_found)
- << Methods[I]->getSourceRange();
+ Diag(Methods[I]->getBeginLoc(), diag::note_also_found)
+ << Methods[I]->getSourceRange();
}
}
}
@@ -4351,7 +4355,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
// Propagate down the 'related result type' bit from overridden methods.
if (RTC != Sema::RTC_Incompatible && overridden->hasRelatedResultType())
- ObjCMethod->SetRelatedResultType();
+ ObjCMethod->setRelatedResultType();
// Then merge the declarations.
mergeObjCMethodDecls(ObjCMethod, overridden);
@@ -4485,7 +4489,7 @@ static void checkObjCMethodX86VectorTypes(Sema &SemaRef,
QualType T;
for (const ParmVarDecl *P : Method->parameters()) {
if (P->getType()->isVectorType()) {
- Loc = P->getLocStart();
+ Loc = P->getBeginLoc();
T = P->getType();
break;
}
@@ -4746,7 +4750,7 @@ Decl *Sema::ActOnMethodDeclaration(
if (InferRelatedResultType &&
!ObjCMethod->getReturnType()->isObjCIndependentClassType())
- ObjCMethod->SetRelatedResultType();
+ ObjCMethod->setRelatedResultType();
}
if (MethodDefinition &&
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 134c76ef28c6..e0850feaffc6 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -64,7 +64,7 @@ bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
}
// Only apply this hack within a system header.
- if (!Context.getSourceManager().isInSystemHeader(D.getLocStart()))
+ if (!Context.getSourceManager().isInSystemHeader(D.getBeginLoc()))
return false;
return llvm::StringSwitch<bool>(RD->getIdentifier()->getName())
@@ -230,6 +230,16 @@ Sema::UpdateExceptionSpec(FunctionDecl *FD,
Context.adjustExceptionSpec(Redecl, ESI);
}
+static bool exceptionSpecNotKnownYet(const FunctionDecl *FD) {
+ auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (!MD)
+ return false;
+
+ auto EST = MD->getType()->castAs<FunctionProtoType>()->getExceptionSpecType();
+ return EST == EST_Unparsed ||
+ (EST == EST_Unevaluated && MD->getParent()->isBeingDefined());
+}
+
static bool CheckEquivalentExceptionSpecImpl(
Sema &S, const PartialDiagnostic &DiagID, const PartialDiagnostic &NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
@@ -278,6 +288,14 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
ReturnValueOnError = false;
}
+ // If we're befriending a member function of a class that's currently being
+ // defined, we might not be able to work out its exception specification yet.
+ // If not, defer the check until later.
+ if (exceptionSpecNotKnownYet(Old) || exceptionSpecNotKnownYet(New)) {
+ DelayedEquivalentExceptionSpecChecks.push_back({New, Old});
+ return false;
+ }
+
// Check the types as written: they must match before any exception
// specification adjustment is applied.
if (!CheckEquivalentExceptionSpecImpl(
@@ -904,26 +922,21 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
if (New->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
EST_Unparsed)
return false;
- if (getLangOpts().CPlusPlus11 && isa<CXXDestructorDecl>(New)) {
- // Don't check uninstantiated template destructors at all. We can only
- // synthesize correct specs after the template is instantiated.
- if (New->getParent()->isDependentType())
- return false;
- if (New->getParent()->isBeingDefined()) {
- // The destructor might be updated once the definition is finished. So
- // remember it and check later.
- DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
- return false;
- }
- }
- // If the old exception specification hasn't been parsed yet, remember that
- // we need to perform this check when we get to the end of the outermost
+
+ // Don't check uninstantiated template destructors at all. We can only
+ // synthesize correct specs after the template is instantiated.
+ if (isa<CXXDestructorDecl>(New) && New->getParent()->isDependentType())
+ return false;
+
+ // If the old exception specification hasn't been parsed yet, or the new
+ // exception specification can't be computed yet, remember that we need to
+ // perform this check when we get to the end of the outermost
// lexically-surrounding class.
- if (Old->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() ==
- EST_Unparsed) {
- DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old));
+ if (exceptionSpecNotKnownYet(Old) || exceptionSpecNotKnownYet(New)) {
+ DelayedOverridingExceptionSpecChecks.push_back({New, Old});
return false;
}
+
unsigned DiagID = diag::err_override_exception_spec;
if (getLangOpts().MicrosoftExt)
DiagID = diag::ext_override_exception_spec;
@@ -992,7 +1005,7 @@ static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D) {
if (!FT)
return CT_Can;
- FT = S.ResolveExceptionSpec(E->getLocStart(), FT);
+ FT = S.ResolveExceptionSpec(E->getBeginLoc(), FT);
if (!FT)
return CT_Can;
@@ -1038,6 +1051,9 @@ CanThrowResult Sema::canThrow(const Expr *E) {
// [Can throw] if in a potentially-evaluated context the expression would
// contain:
switch (E->getStmtClass()) {
+ case Expr::ConstantExprClass:
+ return canThrow(cast<ConstantExpr>(E)->getSubExpr());
+
case Expr::CXXThrowExprClass:
// - a potentially evaluated throw-expression
return CT_Can;
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 3dc6fb151cb7..d5416d4d057c 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -26,6 +26,7 @@
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/FixedPoint.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -65,6 +66,12 @@ bool Sema::CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid) {
if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false))
return false;
+
+ // See if this is an aligned allocation/deallocation function that is
+ // unavailable.
+ if (TreatUnavailableAsInvalid &&
+ isUnavailableAlignedAllocationFunction(*FD))
+ return false;
}
// See if this function is unavailable.
@@ -114,7 +121,7 @@ void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
return NoteDeletedInheritingConstructor(Ctor);
Diag(Decl->getLocation(), diag::note_availability_specified_here)
- << Decl << true;
+ << Decl << 1;
}
/// Determine whether a FunctionDecl was ever declared with an
@@ -205,7 +212,8 @@ void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) {
bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
- bool AvoidPartialAvailabilityChecks) {
+ bool AvoidPartialAvailabilityChecks,
+ ObjCInterfaceDecl *ClassReceiver) {
SourceLocation Loc = Locs.front();
if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
// If there were any diagnostics suppressed by template argument deduction,
@@ -226,6 +234,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
// The function 'main' shall not be used within a program.
if (cast<FunctionDecl>(D)->isMain())
Diag(Loc, diag::ext_main_used);
+
+ diagnoseUnavailableAlignedAllocation(*cast<FunctionDecl>(D), Loc);
}
// See if this is an auto-typed variable whose initializer we are parsing.
@@ -264,6 +274,17 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return true;
}
+ if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ // Lambdas are only default-constructible or assignable in C++2a onwards.
+ if (MD->getParent()->isLambda() &&
+ ((isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) ||
+ MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) {
+ Diag(Loc, diag::warn_cxx17_compat_lambda_def_ctor_assign)
+ << !isa<CXXConstructorDecl>(MD);
+ }
+ }
+
auto getReferencedObjCProp = [](const NamedDecl *D) ->
const ObjCPropertyDecl * {
if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
@@ -291,7 +312,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
}
DiagnoseAvailabilityOfDecl(D, Locs, UnknownObjCClass, ObjCPropertyAccess,
- AvoidPartialAvailabilityChecks);
+ AvoidPartialAvailabilityChecks, ClassReceiver);
DiagnoseUnusedOfDecl(*this, D, Loc);
@@ -386,8 +407,7 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
// or 'NULL' if those are actually defined in the context. Only use
// 'nil' for ObjC methods, where it's much more likely that the
// variadic arguments form a list of object pointers.
- SourceLocation MissingNilLoc
- = getLocForEndOfToken(sentinelExpr->getLocEnd());
+ SourceLocation MissingNilLoc = getLocForEndOfToken(sentinelExpr->getEndLoc());
std::string NullValue;
if (calleeType == CT_Method && PP.isMacroDefined("nil"))
NullValue = "nil";
@@ -501,12 +521,13 @@ static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
&S.Context.Idents.get("object_setClass"),
SourceLocation(), S.LookupOrdinaryName);
if (ObjectSetClass) {
- SourceLocation RHSLocEnd = S.getLocForEndOfToken(RHS->getLocEnd());
- S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign) <<
- FixItHint::CreateInsertion(OIRE->getLocStart(), "object_setClass(") <<
- FixItHint::CreateReplacement(SourceRange(OIRE->getOpLoc(),
- AssignLoc), ",") <<
- FixItHint::CreateInsertion(RHSLocEnd, ")");
+ SourceLocation RHSLocEnd = S.getLocForEndOfToken(RHS->getEndLoc());
+ S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign)
+ << FixItHint::CreateInsertion(OIRE->getBeginLoc(),
+ "object_setClass(")
+ << FixItHint::CreateReplacement(
+ SourceRange(OIRE->getOpLoc(), AssignLoc), ",")
+ << FixItHint::CreateInsertion(RHSLocEnd, ")");
}
else
S.Diag(OIRE->getLocation(), diag::warn_objc_isa_assign);
@@ -516,11 +537,11 @@ static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
&S.Context.Idents.get("object_getClass"),
SourceLocation(), S.LookupOrdinaryName);
if (ObjectGetClass)
- S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_use) <<
- FixItHint::CreateInsertion(OIRE->getLocStart(), "object_getClass(") <<
- FixItHint::CreateReplacement(
- SourceRange(OIRE->getOpLoc(),
- OIRE->getLocEnd()), ")");
+ S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_use)
+ << FixItHint::CreateInsertion(OIRE->getBeginLoc(),
+ "object_getClass(")
+ << FixItHint::CreateReplacement(
+ SourceRange(OIRE->getOpLoc(), OIRE->getEndLoc()), ")");
else
S.Diag(OIRE->getLocation(), diag::warn_objc_isa_use);
}
@@ -575,10 +596,10 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
&Context.Idents.get("object_getClass"),
SourceLocation(), LookupOrdinaryName);
if (ObjectGetClass)
- Diag(E->getExprLoc(), diag::warn_objc_isa_use) <<
- FixItHint::CreateInsertion(OISA->getLocStart(), "object_getClass(") <<
- FixItHint::CreateReplacement(
- SourceRange(OISA->getOpLoc(), OISA->getIsaMemberLoc()), ")");
+ Diag(E->getExprLoc(), diag::warn_objc_isa_use)
+ << FixItHint::CreateInsertion(OISA->getBeginLoc(), "object_getClass(")
+ << FixItHint::CreateReplacement(
+ SourceRange(OISA->getOpLoc(), OISA->getIsaMemberLoc()), ")");
else
Diag(E->getExprLoc(), diag::warn_objc_isa_use);
}
@@ -717,20 +738,33 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
return ExprError();
E = Res.get();
+ QualType ScalarTy = Ty;
+ unsigned NumElts = 0;
+ if (const ExtVectorType *VecTy = Ty->getAs<ExtVectorType>()) {
+ NumElts = VecTy->getNumElements();
+ ScalarTy = VecTy->getElementType();
+ }
+
// If this is a 'float' or '__fp16' (CVR qualified or typedef)
// promote to double.
// Note that default argument promotion applies only to float (and
// half/fp16); it does not apply to _Float16.
- const BuiltinType *BTy = Ty->getAs<BuiltinType>();
+ const BuiltinType *BTy = ScalarTy->getAs<BuiltinType>();
if (BTy && (BTy->getKind() == BuiltinType::Half ||
BTy->getKind() == BuiltinType::Float)) {
if (getLangOpts().OpenCL &&
!getOpenCLOptions().isEnabled("cl_khr_fp64")) {
- if (BTy->getKind() == BuiltinType::Half) {
- E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
- }
+ if (BTy->getKind() == BuiltinType::Half) {
+ QualType Ty = Context.FloatTy;
+ if (NumElts != 0)
+ Ty = Context.getExtVectorType(Ty, NumElts);
+ E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
+ }
} else {
- E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
+ QualType Ty = Context.DoubleTy;
+ if (NumElts != 0)
+ Ty = Context.getExtVectorType(Ty, NumElts);
+ E = ImpCastExprToType(E, Ty, CK_FloatingCast).get();
}
}
@@ -819,40 +853,38 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
switch (VAK) {
case VAK_ValidInCXX11:
DiagRuntimeBehavior(
- E->getLocStart(), nullptr,
- PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
- << Ty << CT);
+ E->getBeginLoc(), nullptr,
+ PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg) << Ty << CT);
LLVM_FALLTHROUGH;
case VAK_Valid:
if (Ty->isRecordType()) {
// This is unlikely to be what the user intended. If the class has a
// 'c_str' member function, the user probably meant to call that.
- DiagRuntimeBehavior(E->getLocStart(), nullptr,
+ DiagRuntimeBehavior(E->getBeginLoc(), nullptr,
PDiag(diag::warn_pass_class_arg_to_vararg)
- << Ty << CT << hasCStrMethod(E) << ".c_str()");
+ << Ty << CT << hasCStrMethod(E) << ".c_str()");
}
break;
case VAK_Undefined:
case VAK_MSVCUndefined:
- DiagRuntimeBehavior(
- E->getLocStart(), nullptr,
- PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
- << getLangOpts().CPlusPlus11 << Ty << CT);
+ DiagRuntimeBehavior(E->getBeginLoc(), nullptr,
+ PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
+ << getLangOpts().CPlusPlus11 << Ty << CT);
break;
case VAK_Invalid:
if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct)
- Diag(E->getLocStart(),
- diag::err_cannot_pass_non_trivial_c_struct_to_vararg) << Ty << CT;
+ Diag(E->getBeginLoc(),
+ diag::err_cannot_pass_non_trivial_c_struct_to_vararg)
+ << Ty << CT;
else if (Ty->isObjCObjectType())
- DiagRuntimeBehavior(
- E->getLocStart(), nullptr,
- PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
- << Ty << CT);
+ DiagRuntimeBehavior(E->getBeginLoc(), nullptr,
+ PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
+ << Ty << CT);
else
- Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg)
- << isa<InitListExpr>(E) << Ty << CT;
+ Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg)
+ << isa<InitListExpr>(E) << Ty << CT;
break;
}
}
@@ -890,20 +922,19 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
- E->getLocStart());
+ E->getBeginLoc());
ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc,
Name, true, false);
if (TrapFn.isInvalid())
return ExprError();
- ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(),
- E->getLocStart(), None,
- E->getLocEnd());
+ ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(), E->getBeginLoc(),
+ None, E->getEndLoc());
if (Call.isInvalid())
return ExprError();
- ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma,
- Call.get(), E);
+ ExprResult Comma =
+ ActOnBinOp(TUScope, E->getBeginLoc(), tok::comma, Call.get(), E);
if (Comma.isInvalid())
return ExprError();
return Comma.get();
@@ -1436,9 +1467,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// We strip parens here because the controlling expression is typically
// parenthesized in macro definitions.
ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_multi_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType()
- << (unsigned) CompatIndices.size();
+ Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_multi_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType()
+ << (unsigned)CompatIndices.size();
for (unsigned I : CompatIndices) {
Diag(Types[I]->getTypeLoc().getBeginLoc(),
diag::note_compat_assoc)
@@ -1455,8 +1486,8 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// We strip parens here because the controlling expression is typically
// parenthesized in macro definitions.
ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_no_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType();
+ Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_no_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType();
return ExprError();
}
@@ -1549,6 +1580,32 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
CharTy = Context.UnsignedCharTy;
}
+ // Warn on initializing an array of char from a u8 string literal; this
+ // becomes ill-formed in C++2a.
+ if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus2a &&
+ !getLangOpts().Char8 && Kind == StringLiteral::UTF8) {
+ Diag(StringTokLocs.front(), diag::warn_cxx2a_compat_utf8_string);
+
+ // Create removals for all 'u8' prefixes in the string literal(s). This
+ // ensures C++2a compatibility (but may change the program behavior when
+ // built by non-Clang compilers for which the execution character set is
+ // not always UTF-8).
+ auto RemovalDiag = PDiag(diag::note_cxx2a_compat_utf8_string_remove_u8);
+ SourceLocation RemovalDiagLoc;
+ for (const Token &Tok : StringToks) {
+ if (Tok.getKind() == tok::utf8_string_literal) {
+ if (RemovalDiagLoc.isInvalid())
+ RemovalDiagLoc = Tok.getLocation();
+ RemovalDiag << FixItHint::CreateRemoval(CharSourceRange::getCharRange(
+ Tok.getLocation(),
+ Lexer::AdvanceToTokenCharacter(Tok.getLocation(), 2,
+ getSourceManager(), getLangOpts())));
+ }
+ }
+ Diag(RemovalDiagLoc, RemovalDiag);
+ }
+
+
QualType CharTyConst = CharTy;
// A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
@@ -1681,7 +1738,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
if (getLangOpts().ObjCWeak && isa<VarDecl>(D) &&
Ty.getObjCLifetime() == Qualifiers::OCL_Weak && !isUnevaluatedContext() &&
- !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getLocStart()))
+ !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getBeginLoc()))
getCurFunction()->recordUseOfWeak(E);
FieldDecl *FD = dyn_cast<FieldDecl>(D);
@@ -2528,7 +2585,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
if (Method->isStatic())
return From;
- DestType = Method->getThisType(Context);
+ DestType = Method->getThisType();
DestRecordType = DestType->getPointeeType();
if (FromType->getAs<PointerType>()) {
@@ -3026,7 +3083,7 @@ static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
}
ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentType IT) {
+ PredefinedExpr::IdentKind IK) {
// Pick the current block, lambda, captured statement or function.
Decl *currentDecl = nullptr;
if (const BlockScopeInfo *BSI = getCurBlock())
@@ -3050,11 +3107,11 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
else {
// Pre-defined identifiers are of type char[x], where x is the length of
// the string.
- auto Str = PredefinedExpr::ComputeName(IT, currentDecl);
+ auto Str = PredefinedExpr::ComputeName(IK, currentDecl);
unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
- if (IT == PredefinedExpr::LFunction || IT == PredefinedExpr::LFuncSig) {
+ if (IK == PredefinedExpr::LFunction || IK == PredefinedExpr::LFuncSig) {
ResTy =
Context.adjustStringLiteralBaseType(Context.WideCharTy.withConst());
SmallString<32> RawChars;
@@ -3073,24 +3130,24 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
}
}
- return new (Context) PredefinedExpr(Loc, ResTy, IT, SL);
+ return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
- PredefinedExpr::IdentType IT;
+ PredefinedExpr::IdentKind IK;
switch (Kind) {
default: llvm_unreachable("Unknown simple primary expr!");
- case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
- case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
- case tok::kw___FUNCDNAME__: IT = PredefinedExpr::FuncDName; break; // [MS]
- case tok::kw___FUNCSIG__: IT = PredefinedExpr::FuncSig; break; // [MS]
- case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break; // [MS]
- case tok::kw_L__FUNCSIG__: IT = PredefinedExpr::LFuncSig; break; // [MS]
- case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
+ case tok::kw___func__: IK = PredefinedExpr::Func; break; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: IK = PredefinedExpr::Function; break;
+ case tok::kw___FUNCDNAME__: IK = PredefinedExpr::FuncDName; break; // [MS]
+ case tok::kw___FUNCSIG__: IK = PredefinedExpr::FuncSig; break; // [MS]
+ case tok::kw_L__FUNCTION__: IK = PredefinedExpr::LFunction; break; // [MS]
+ case tok::kw_L__FUNCSIG__: IK = PredefinedExpr::LFuncSig; break; // [MS]
+ case tok::kw___PRETTY_FUNCTION__: IK = PredefinedExpr::PrettyFunction; break;
}
- return BuildPredefinedExpr(Loc, IT);
+ return BuildPredefinedExpr(Loc, IK);
}
ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
@@ -3363,16 +3420,14 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
bool isSigned = !Literal.isUnsigned;
unsigned scale = Context.getFixedPointScale(Ty);
- unsigned ibits = Context.getFixedPointIBits(Ty);
unsigned bit_width = Context.getTypeInfo(Ty).Width;
llvm::APInt Val(bit_width, 0, isSigned);
bool Overflowed = Literal.GetFixedPointValue(Val, scale);
+ bool ValIsZero = Val.isNullValue() && !Overflowed;
- // Do not use bit_width since some types may have padding like _Fract or
- // unsigned _Accums if PaddingOnUnsignedFixedPoint is set.
- auto MaxVal = llvm::APInt::getMaxValue(ibits + scale).zextOrSelf(bit_width);
- if (Literal.isFract && Val == MaxVal + 1)
+ auto MaxVal = Context.getFixedPointMax(Ty).getValue();
+ if (Literal.isFract && Val == MaxVal + 1 && !ValIsZero)
// Clause 6.4.4 - The value of a constant shall be in the range of
// representable values for its type, with exception for constants of a
// fract type with a value of exactly 1; such a constant shall denote
@@ -3588,7 +3643,8 @@ static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
// C99 6.5.3.4p1:
if (T->isFunctionType() &&
- (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf)) {
+ (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf ||
+ TraitKind == UETT_PreferredAlignOf)) {
// sizeof(function)/alignof(function) is allowed as an extension.
S.Diag(Loc, diag::ext_sizeof_alignof_function_type)
<< TraitKind << ArgRange;
@@ -3666,7 +3722,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// the expression to be complete. 'sizeof' requires the expression's type to
// be complete (and will attempt to complete it if it's an array of unknown
// bound).
- if (ExprKind == UETT_AlignOf) {
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
if (RequireCompleteType(E->getExprLoc(),
Context.getBaseElementType(E->getType()),
diag::err_sizeof_alignof_incomplete_type, ExprKind,
@@ -3690,7 +3746,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// The operand for sizeof and alignof is in an unevaluated expression context,
// so side effects could result in unintended consequences.
- if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf) &&
+ if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf ||
+ ExprKind == UETT_PreferredAlignOf) &&
!inTemplateInstantiation() && E->HasSideEffects(Context, false))
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
@@ -3759,7 +3816,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
// C11 6.5.3.4/3, C++11 [expr.alignof]p3:
// When alignof or _Alignof is applied to an array type, the result
// is the alignment of the element type.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_OpenMPRequiredSimdAlign)
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
+ ExprKind == UETT_OpenMPRequiredSimdAlign)
ExprType = Context.getBaseElementType(ExprType);
if (ExprKind == UETT_VecStep)
@@ -3788,7 +3846,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
return false;
}
-static bool CheckAlignOfExpr(Sema &S, Expr *E) {
+static bool CheckAlignOfExpr(Sema &S, Expr *E, UnaryExprOrTypeTrait ExprKind) {
E = E->IgnoreParens();
// Cannot know anything else if the expression is dependent.
@@ -3842,7 +3900,7 @@ static bool CheckAlignOfExpr(Sema &S, Expr *E) {
return false;
}
- return S.CheckUnaryExprOrTypeTraitOperand(E, UETT_AlignOf);
+ return S.CheckUnaryExprOrTypeTraitOperand(E, ExprKind);
}
bool Sema::CheckVecStepExpr(Expr *E) {
@@ -4038,8 +4096,8 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
bool isInvalid = false;
if (E->isTypeDependent()) {
// Delay type-checking for type-dependent expressions.
- } else if (ExprKind == UETT_AlignOf) {
- isInvalid = CheckAlignOfExpr(*this, E);
+ } else if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf) {
+ isInvalid = CheckAlignOfExpr(*this, E, ExprKind);
} else if (ExprKind == UETT_VecStep) {
isInvalid = CheckVecStepExpr(E);
} else if (ExprKind == UETT_OpenMPRequiredSimdAlign) {
@@ -4238,7 +4296,57 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx);
}
- return CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+ ExprResult Res = CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+
+ if (!Res.isInvalid() && isa<ArraySubscriptExpr>(Res.get()))
+ CheckSubscriptAccessOfNoDeref(cast<ArraySubscriptExpr>(Res.get()));
+
+ return Res;
+}
+
+void Sema::CheckAddressOfNoDeref(const Expr *E) {
+ ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
+ const Expr *StrippedExpr = E->IgnoreParenImpCasts();
+
+ // For expressions like `&(*s).b`, the base is recorded and what should be
+ // checked.
+ const MemberExpr *Member = nullptr;
+ while ((Member = dyn_cast<MemberExpr>(StrippedExpr)) && !Member->isArrow())
+ StrippedExpr = Member->getBase()->IgnoreParenImpCasts();
+
+ LastRecord.PossibleDerefs.erase(StrippedExpr);
+}
+
+void Sema::CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E) {
+ QualType ResultTy = E->getType();
+ ExpressionEvaluationContextRecord &LastRecord = ExprEvalContexts.back();
+
+ // Bail if the element is an array since it is not memory access.
+ if (isa<ArrayType>(ResultTy))
+ return;
+
+ if (ResultTy->hasAttr(attr::NoDeref)) {
+ LastRecord.PossibleDerefs.insert(E);
+ return;
+ }
+
+ // Check if the base type is a pointer to a member access of a struct
+ // marked with noderef.
+ const Expr *Base = E->getBase();
+ QualType BaseTy = Base->getType();
+ if (!(isa<ArrayType>(BaseTy) || isa<PointerType>(BaseTy)))
+ // Not a pointer access
+ return;
+
+ const MemberExpr *Member = nullptr;
+ while ((Member = dyn_cast<MemberExpr>(Base->IgnoreParenCasts())) &&
+ Member->isArrow())
+ Base = Member->getBase();
+
+ if (const auto *Ptr = dyn_cast<PointerType>(Base->getType())) {
+ if (Ptr->getPointeeType()->hasAttr(attr::NoDeref))
+ LastRecord.PossibleDerefs.insert(E);
+ }
}
ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
@@ -4339,10 +4447,11 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
return ExprError();
if (LowerBound && !OriginalTy->isAnyPointerType()) {
- llvm::APSInt LowerBoundValue;
- if (LowerBound->EvaluateAsInt(LowerBoundValue, Context)) {
+ Expr::EvalResult Result;
+ if (LowerBound->EvaluateAsInt(Result, Context)) {
// OpenMP 4.5, [2.4 Array Sections]
// The array section must be a subset of the original array.
+ llvm::APSInt LowerBoundValue = Result.Val.getInt();
if (LowerBoundValue.isNegative()) {
Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array)
<< LowerBound->getSourceRange();
@@ -4352,10 +4461,11 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
}
if (Length) {
- llvm::APSInt LengthValue;
- if (Length->EvaluateAsInt(LengthValue, Context)) {
+ Expr::EvalResult Result;
+ if (Length->EvaluateAsInt(Result, Context)) {
// OpenMP 4.5, [2.4 Array Sections]
// The length must evaluate to non-negative integers.
+ llvm::APSInt LengthValue = Result.Val.getInt();
if (LengthValue.isNegative()) {
Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
<< LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
@@ -4488,8 +4598,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// wasn't promoted because of the C90 rule that doesn't
// allow promoting non-lvalue arrays. Warn, then
// force the promotion here.
- Diag(LHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
- LHSExp->getSourceRange();
+ Diag(LHSExp->getBeginLoc(), diag::ext_subscript_non_lvalue)
+ << LHSExp->getSourceRange();
LHSExp = ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy),
CK_ArrayToPointerDecay).get();
LHSTy = LHSExp->getType();
@@ -4499,8 +4609,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
ResultType = LHSTy->getAs<PointerType>()->getPointeeType();
} else if (RHSTy->isArrayType()) {
// Same as previous, except for 123[f().a] case
- Diag(RHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
- RHSExp->getSourceRange();
+ Diag(RHSExp->getBeginLoc(), diag::ext_subscript_non_lvalue)
+ << RHSExp->getSourceRange();
RHSExp = ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy),
CK_ArrayToPointerDecay).get();
RHSTy = RHSExp->getType();
@@ -4527,8 +4637,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// type. Note that Functions are not objects, and that (in C99 parlance)
// incomplete types are not object types.
if (ResultType->isFunctionType()) {
- Diag(BaseExpr->getLocStart(), diag::err_subscript_function_type)
- << ResultType << BaseExpr->getSourceRange();
+ Diag(BaseExpr->getBeginLoc(), diag::err_subscript_function_type)
+ << ResultType << BaseExpr->getSourceRange();
return ExprError();
}
@@ -4594,7 +4704,7 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
if (Inst.isInvalid())
return true;
if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getLocStart(), diag::err_recursive_default_argument) << FD;
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
Param->setInvalidDecl();
return true;
}
@@ -4616,9 +4726,9 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
// Check the expression as an initializer for the parameter.
InitializedEntity Entity
= InitializedEntity::InitializeParameter(Context, Param);
- InitializationKind Kind
- = InitializationKind::CreateCopy(Param->getLocation(),
- /*FIXME:EqualLoc*/UninstExpr->getLocStart());
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Param->getLocation(),
+ /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
Expr *ResultE = Result.getAs<Expr>();
InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
@@ -4626,8 +4736,9 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
if (Result.isInvalid())
return true;
- Result = ActOnFinishFullExpr(Result.getAs<Expr>(),
- Param->getOuterLocStart());
+ Result =
+ ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
+ /*DiscardedValue*/ false);
if (Result.isInvalid())
return true;
@@ -4640,7 +4751,7 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
// If the default argument expression is not set yet, we are building it now.
if (!Param->hasInit()) {
- Diag(Param->getLocStart(), diag::err_recursive_default_argument) << FD;
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
Param->setInvalidDecl();
return true;
}
@@ -4725,7 +4836,7 @@ static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn,
ArrayRef<Expr *> Args) {
MemberExpr *ME = dyn_cast<MemberExpr>(Fn);
DeclarationName FuncName = FDecl->getDeclName();
- SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getLocStart();
+ SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getBeginLoc();
if (TypoCorrection Corrected = S.CorrectTypo(
DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName,
@@ -4816,12 +4927,14 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocStart(), diag::note_callee_decl)
- << FDecl;
+ Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
return true;
}
- Call->setNumArgs(Context, NumParams);
+ // We reserve space for the default arguments when we create
+ // the call expression, before calling ConvertArgumentsForCall.
+ assert((Call->getNumArgs() == NumParams) &&
+ "We should have reserved space for the default arguments before!");
}
// If too many are passed and not variadic, error on the extras and drop
@@ -4839,39 +4952,38 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
<< TC.getCorrectionRange());
} else if (NumParams == 1 && FDecl &&
FDecl->getParamDecl(0)->getDeclName())
- Diag(Args[NumParams]->getLocStart(),
+ Diag(Args[NumParams]->getBeginLoc(),
MinArgs == NumParams
? diag::err_typecheck_call_too_many_args_one
: diag::err_typecheck_call_too_many_args_at_most_one)
<< FnKind << FDecl->getParamDecl(0)
<< static_cast<unsigned>(Args.size()) << Fn->getSourceRange()
- << SourceRange(Args[NumParams]->getLocStart(),
- Args.back()->getLocEnd());
+ << SourceRange(Args[NumParams]->getBeginLoc(),
+ Args.back()->getEndLoc());
else
- Diag(Args[NumParams]->getLocStart(),
+ Diag(Args[NumParams]->getBeginLoc(),
MinArgs == NumParams
? diag::err_typecheck_call_too_many_args
: diag::err_typecheck_call_too_many_args_at_most)
<< FnKind << NumParams << static_cast<unsigned>(Args.size())
<< Fn->getSourceRange()
- << SourceRange(Args[NumParams]->getLocStart(),
- Args.back()->getLocEnd());
+ << SourceRange(Args[NumParams]->getBeginLoc(),
+ Args.back()->getEndLoc());
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocStart(), diag::note_callee_decl)
- << FDecl;
+ Diag(FDecl->getBeginLoc(), diag::note_callee_decl) << FDecl;
// This deletes the extra arguments.
- Call->setNumArgs(Context, NumParams);
+ Call->shrinkNumArgs(NumParams);
return true;
}
}
SmallVector<Expr *, 8> AllArgs;
VariadicCallType CallType = getVariadicCallType(FDecl, Proto, Fn);
- Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl,
- Proto, 0, Args, AllArgs, CallType);
+ Invalid = GatherArgumentsForCall(Call->getBeginLoc(), FDecl, Proto, 0, Args,
+ AllArgs, CallType);
if (Invalid)
return true;
unsigned TotalNumArgs = AllArgs.size();
@@ -4899,8 +5011,7 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
if (ArgIx < Args.size()) {
Arg = Args[ArgIx++];
- if (RequireCompleteType(Arg->getLocStart(),
- ProtoArgType,
+ if (RequireCompleteType(Arg->getBeginLoc(), ProtoArgType,
diag::err_call_incomplete_argument, Arg))
return true;
@@ -5058,6 +5169,9 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -5153,10 +5267,13 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
continue;
}
+ QualType PointeeType = ParamType->getPointeeType();
+ if (PointeeType.getQualifiers().hasAddressSpace())
+ continue;
+
NeedsNewDecl = true;
LangAS AS = ArgType->getPointeeType().getAddressSpace();
- QualType PointeeType = ParamType->getPointeeType();
PointeeType = Context.getAddrSpaceQualType(PointeeType, AS);
OverloadParams.push_back(Context.getPointerType(PointeeType));
}
@@ -5205,7 +5322,7 @@ static void checkDirectCallValidity(Sema &S, const Expr *Fn,
return;
if (const EnableIfAttr *Attr = S.CheckEnableIf(Callee, ArgExprs, true)) {
- S.Diag(Fn->getLocStart(),
+ S.Diag(Fn->getBeginLoc(),
isa<CXXMethodDecl>(Callee)
? diag::err_ovl_no_viable_member_function_in_call
: diag::err_ovl_no_viable_function_in_call)
@@ -5317,14 +5434,14 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (isa<CXXPseudoDestructorExpr>(Fn)) {
if (!ArgExprs.empty()) {
// Pseudo-destructor calls should not have any arguments.
- Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
+ Diag(Fn->getBeginLoc(), diag::err_pseudo_dtor_call_with_args)
<< FixItHint::CreateRemoval(
- SourceRange(ArgExprs.front()->getLocStart(),
- ArgExprs.back()->getLocEnd()));
+ SourceRange(ArgExprs.front()->getBeginLoc(),
+ ArgExprs.back()->getEndLoc()));
}
- return new (Context)
- CallExpr(Context, Fn, None, Context.VoidTy, VK_RValue, RParenLoc);
+ return CallExpr::Create(Context, Fn, /*Args=*/{}, Context.VoidTy,
+ VK_RValue, RParenLoc);
}
if (Fn->getType() == Context.PseudoObjectTy) {
ExprResult result = CheckPlaceholderExpr(Fn);
@@ -5334,25 +5451,19 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
// Determine whether this is a dependent call inside a C++ template,
// in which case we won't do any semantic analysis now.
- bool Dependent = false;
- if (Fn->isTypeDependent())
- Dependent = true;
- else if (Expr::hasAnyTypeDependentArguments(ArgExprs))
- Dependent = true;
-
- if (Dependent) {
+ if (Fn->isTypeDependent() || Expr::hasAnyTypeDependentArguments(ArgExprs)) {
if (ExecConfig) {
- return new (Context) CUDAKernelCallExpr(
+ return CUDAKernelCallExpr::Create(
Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs,
Context.DependentTy, VK_RValue, RParenLoc);
} else {
- tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
+ tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
*this, dyn_cast<UnresolvedMemberExpr>(Fn->IgnoreParens()),
- Fn->getLocStart());
+ Fn->getBeginLoc());
- return new (Context) CallExpr(
- Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc);
+ return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
+ VK_RValue, RParenLoc);
}
}
@@ -5380,8 +5491,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
// We aren't supposed to apply this logic if there's an '&' involved.
if (!find.HasFormOfMemberPointer) {
if (Expr::hasAnyTypeDependentArguments(ArgExprs))
- return new (Context) CallExpr(
- Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc);
+ return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
+ VK_RValue, RParenLoc);
OverloadExpr *ovl = find.Expression;
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
return BuildOverloadedCallExpr(
@@ -5430,9 +5541,8 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) {
- if (CallingNDeclIndirectly &&
- !checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
- Fn->getLocStart()))
+ if (CallingNDeclIndirectly && !checkAddressOfFunctionIsAvailable(
+ FD, /*Complain=*/true, Fn->getBeginLoc()))
return ExprError();
if (getLangOpts().OpenCL && checkOpenCLDisabledDecl(*FD, *Fn))
@@ -5484,12 +5594,11 @@ ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
/// block-pointer type.
///
/// \param NDecl the declaration being called, if available
-ExprResult
-Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc,
- Expr *Config, bool IsExecConfig) {
+ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
+ SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc, Expr *Config,
+ bool IsExecConfig, ADLCallKind UsesADL) {
FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
@@ -5514,28 +5623,71 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// We special-case function promotion here because we only allow promoting
// builtin functions to function pointers in the callee of a call.
ExprResult Result;
+ QualType ResultTy;
if (BuiltinID &&
Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) {
- Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()),
- CK_BuiltinFnToFnPtr).get();
+ // Extract the return type from the (builtin) function pointer type.
+ // FIXME Several builtins still have setType in
+ // Sema::CheckBuiltinFunctionCall. One should review their definitions in
+ // Builtins.def to ensure they are correct before removing setType calls.
+ QualType FnPtrTy = Context.getPointerType(FDecl->getType());
+ Result = ImpCastExprToType(Fn, FnPtrTy, CK_BuiltinFnToFnPtr).get();
+ ResultTy = FDecl->getCallResultType();
} else {
Result = CallExprUnaryConversions(Fn);
+ ResultTy = Context.BoolTy;
}
if (Result.isInvalid())
return ExprError();
Fn = Result.get();
- // Make the call expr early, before semantic checks. This guarantees cleanup
- // of arguments and function on error.
+ // Check for a valid function type, but only if it is not a builtin which
+ // requires custom type checking. These will be handled by
+ // CheckBuiltinFunctionCall below just after creation of the call expression.
+ const FunctionType *FuncT = nullptr;
+ if (!BuiltinID || !Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) {
+ retry:
+ if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
+ // C99 6.5.2.2p1 - "The expression that denotes the called function shall
+ // have type pointer to function".
+ FuncT = PT->getPointeeType()->getAs<FunctionType>();
+ if (!FuncT)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ } else if (const BlockPointerType *BPT =
+ Fn->getType()->getAs<BlockPointerType>()) {
+ FuncT = BPT->getPointeeType()->castAs<FunctionType>();
+ } else {
+ // Handle calls to expressions of unknown-any type.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
+ if (rewrite.isInvalid()) return ExprError();
+ Fn = rewrite.get();
+ goto retry;
+ }
+
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ }
+ }
+
+ // Get the number of parameters in the function prototype, if any.
+ // We will allocate space for max(Args.size(), NumParams) arguments
+ // in the call expression.
+ const auto *Proto = dyn_cast_or_null<FunctionProtoType>(FuncT);
+ unsigned NumParams = Proto ? Proto->getNumParams() : 0;
+
CallExpr *TheCall;
- if (Config)
- TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
- cast<CallExpr>(Config), Args,
- Context.BoolTy, VK_RValue,
- RParenLoc);
- else
- TheCall = new (Context) CallExpr(Context, Fn, Args, Context.BoolTy,
- VK_RValue, RParenLoc);
+ if (Config) {
+ assert(UsesADL == ADLCallKind::NotADL &&
+ "CUDAKernelCallExpr should not use ADL");
+ TheCall =
+ CUDAKernelCallExpr::Create(Context, Fn, cast<CallExpr>(Config), Args,
+ ResultTy, VK_RValue, RParenLoc, NumParams);
+ } else {
+ TheCall = CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue,
+ RParenLoc, NumParams, UsesADL);
+ }
if (!getLangOpts().CPlusPlus) {
// C cannot always handle TypoExpr nodes in builtin calls and direct
@@ -5546,39 +5698,16 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (!Result.isUsable()) return ExprError();
TheCall = dyn_cast<CallExpr>(Result.get());
if (!TheCall) return Result;
- Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
+ // TheCall at this point has max(Args.size(), NumParams) arguments,
+ // with extra arguments nulled. We don't want to introduce nulled
+ // arguments in Args and so we only take the first Args.size() arguments.
+ Args = llvm::makeArrayRef(TheCall->getArgs(), Args.size());
}
- // Bail out early if calling a builtin with custom typechecking.
+ // Bail out early if calling a builtin with custom type checking.
if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall);
- retry:
- const FunctionType *FuncT;
- if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
- // C99 6.5.2.2p1 - "The expression that denotes the called function shall
- // have type pointer to function".
- FuncT = PT->getPointeeType()->getAs<FunctionType>();
- if (!FuncT)
- return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
- } else if (const BlockPointerType *BPT =
- Fn->getType()->getAs<BlockPointerType>()) {
- FuncT = BPT->getPointeeType()->castAs<FunctionType>();
- } else {
- // Handle calls to expressions of unknown-any type.
- if (Fn->getType() == Context.UnknownAnyTy) {
- ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
- if (rewrite.isInvalid()) return ExprError();
- Fn = rewrite.get();
- TheCall->setCallee(Fn);
- goto retry;
- }
-
- return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
- << Fn->getType() << Fn->getSourceRange());
- }
-
if (getLangOpts().CUDA) {
if (Config) {
// CUDA: Kernel calls must be to global functions
@@ -5599,7 +5728,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
}
// Check for a valid return type
- if (CheckCallReturnType(FuncT->getReturnType(), Fn->getLocStart(), TheCall,
+ if (CheckCallReturnType(FuncT->getReturnType(), Fn->getBeginLoc(), TheCall,
FDecl))
return ExprError();
@@ -5607,7 +5736,6 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setType(FuncT->getCallResultType(Context));
TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
- const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT);
if (Proto) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc,
IsExecConfig))
@@ -5655,8 +5783,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
Arg = ArgE.getAs<Expr>();
}
- if (RequireCompleteType(Arg->getLocStart(),
- Arg->getType(),
+ if (RequireCompleteType(Arg->getBeginLoc(), Arg->getType(),
diag::err_call_incomplete_argument, Arg))
return ExprError();
@@ -5739,13 +5866,6 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
LiteralExpr = Result.get();
bool isFileScope = !CurContext->isFunctionOrMethod();
- if (isFileScope &&
- !LiteralExpr->isTypeDependent() &&
- !LiteralExpr->isValueDependent() &&
- !literalType->isDependentType()) { // 6.5.2.5p3
- if (CheckForConstantInitializer(LiteralExpr, literalType))
- return ExprError();
- }
// In C, compound literals are l-values for some reason.
// For GCC compatibility, in C++, file-scope array compound literals with
@@ -5770,9 +5890,32 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
? VK_RValue
: VK_LValue;
- return MaybeBindToTemporary(
- new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
- VK, LiteralExpr, isFileScope));
+ if (isFileScope)
+ if (auto ILE = dyn_cast<InitListExpr>(LiteralExpr))
+ for (unsigned i = 0, j = ILE->getNumInits(); i != j; i++) {
+ Expr *Init = ILE->getInit(i);
+ ILE->setInit(i, ConstantExpr::Create(Context, Init));
+ }
+
+ Expr *E = new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, LiteralExpr, isFileScope);
+ if (isFileScope) {
+ if (!LiteralExpr->isTypeDependent() &&
+ !LiteralExpr->isValueDependent() &&
+ !literalType->isDependentType()) // C99 6.5.2.5p3
+ if (CheckForConstantInitializer(LiteralExpr, literalType))
+ return ExprError();
+ } else if (literalType.getAddressSpace() != LangAS::opencl_private &&
+ literalType.getAddressSpace() != LangAS::Default) {
+ // Embedded-C extensions to C99 6.5.2.5:
+ // "If the compound literal occurs inside the body of a function, the
+ // type name shall not be qualified by an address-space qualifier."
+ Diag(LParenLoc, diag::err_compound_literal_with_address_space)
+ << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd());
+ return ExprError();
+ }
+
+ return MaybeBindToTemporary(E);
}
ExprResult
@@ -5854,6 +5997,8 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
LangAS DestAS = DestTy->getPointeeType().getAddressSpace();
if (SrcAS != DestAS)
return CK_AddressSpaceConversion;
+ if (Context.hasCvrSimilarType(SrcTy, DestTy))
+ return CK_NoOp;
return CK_BitCast;
}
case Type::STK_BlockPointer:
@@ -5874,10 +6019,33 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
case Type::STK_FloatingComplex:
case Type::STK_IntegralComplex:
case Type::STK_MemberPointer:
+ case Type::STK_FixedPoint:
llvm_unreachable("illegal cast from pointer");
}
llvm_unreachable("Should have returned before this");
+ case Type::STK_FixedPoint:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FixedPoint:
+ return CK_FixedPointCast;
+ case Type::STK_Bool:
+ return CK_FixedPointToBoolean;
+ case Type::STK_Integral:
+ case Type::STK_Floating:
+ case Type::STK_IntegralComplex:
+ case Type::STK_FloatingComplex:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << DestTy;
+ return CK_IntegralCast;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ case Type::STK_MemberPointer:
+ llvm_unreachable("illegal cast to pointer type");
+ }
+ llvm_unreachable("Should have returned before this");
+
case Type::STK_Bool: // casting from bool is like casting from an integer
case Type::STK_Integral:
switch (DestTy->getScalarTypeKind()) {
@@ -5906,6 +6074,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
return CK_FloatingRealToComplex;
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5933,6 +6106,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid float->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5962,6 +6140,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid complex float->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
@@ -5991,6 +6174,11 @@ CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
llvm_unreachable("valid complex int->pointer cast?");
case Type::STK_MemberPointer:
llvm_unreachable("member pointer type in C");
+ case Type::STK_FixedPoint:
+ Diag(Src.get()->getExprLoc(),
+ diag::err_unimplemented_conversion_with_fixed_point_type)
+ << SrcTy;
+ return CK_IntegralCast;
}
llvm_unreachable("Should have returned before this");
}
@@ -6323,8 +6511,7 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
ExprResult Sema::ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val) {
- Expr *expr = new (Context) ParenListExpr(Context, L, Val, R);
- return expr;
+ return ParenListExpr::Create(Context, L, Val, R);
}
/// Emit a specialized diagnostic when one expression is a null pointer
@@ -6394,11 +6581,11 @@ static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
Expr *RHSExpr = RHS.get();
if (!LHSExpr->getType()->isVoidType())
- S.Diag(RHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
- << RHSExpr->getSourceRange();
+ S.Diag(RHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void)
+ << RHSExpr->getSourceRange();
if (!RHSExpr->getType()->isVoidType())
- S.Diag(LHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
- << LHSExpr->getSourceRange();
+ S.Diag(LHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void)
+ << LHSExpr->getSourceRange();
LHS = S.ImpCastExprToType(LHS.get(), S.Context.VoidTy, CK_ToVoid);
RHS = S.ImpCastExprToType(RHS.get(), S.Context.VoidTy, CK_ToVoid);
return S.Context.VoidTy;
@@ -6458,20 +6645,18 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
LangAS ResultAddrSpace = LangAS::Default;
LangAS LAddrSpace = lhQual.getAddressSpace();
LangAS RAddrSpace = rhQual.getAddressSpace();
- if (S.getLangOpts().OpenCL) {
- // OpenCL v1.1 s6.5 - Conversion between pointers to distinct address
- // spaces is disallowed.
- if (lhQual.isAddressSpaceSupersetOf(rhQual))
- ResultAddrSpace = LAddrSpace;
- else if (rhQual.isAddressSpaceSupersetOf(lhQual))
- ResultAddrSpace = RAddrSpace;
- else {
- S.Diag(Loc,
- diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
- << LHSTy << RHSTy << 2 << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
- return QualType();
- }
+
+ // OpenCL v1.1 s6.5 - Conversion between pointers to distinct address
+ // spaces is disallowed.
+ if (lhQual.isAddressSpaceSupersetOf(rhQual))
+ ResultAddrSpace = LAddrSpace;
+ else if (rhQual.isAddressSpaceSupersetOf(lhQual))
+ ResultAddrSpace = RAddrSpace;
+ else {
+ S.Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers)
+ << LHSTy << RHSTy << 2 << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
}
unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers();
@@ -6489,16 +6674,12 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
// Thus for conditional operator we merge CVR and address space unqualified
// pointees and if there is a composite type we return a pointer to it with
// merged qualifiers.
- if (S.getLangOpts().OpenCL) {
- LHSCastKind = LAddrSpace == ResultAddrSpace
- ? CK_BitCast
- : CK_AddressSpaceConversion;
- RHSCastKind = RAddrSpace == ResultAddrSpace
- ? CK_BitCast
- : CK_AddressSpaceConversion;
- lhQual.removeAddressSpace();
- rhQual.removeAddressSpace();
- }
+ LHSCastKind =
+ LAddrSpace == ResultAddrSpace ? CK_BitCast : CK_AddressSpaceConversion;
+ RHSCastKind =
+ RAddrSpace == ResultAddrSpace ? CK_BitCast : CK_AddressSpaceConversion;
+ lhQual.removeAddressSpace();
+ rhQual.removeAddressSpace();
lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual);
rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual);
@@ -6514,6 +6695,7 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
S.Context.getAddrSpaceQualType(S.Context.VoidTy, ResultAddrSpace));
LHS = S.ImpCastExprToType(LHS.get(), incompatTy, LHSCastKind);
RHS = S.ImpCastExprToType(RHS.get(), incompatTy, RHSCastKind);
+
// FIXME: For OpenCL the warning emission and cast to void* leaves a room
// for casts between types with incompatible address space qualifiers.
// For the following code the compiler produces casts between global and
@@ -6524,6 +6706,7 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
S.Diag(Loc, diag::ext_typecheck_cond_incompatible_pointers)
<< LHSTy << RHSTy << LHS.get()->getSourceRange()
<< RHS.get()->getSourceRange();
+
return incompatTy;
}
@@ -7219,14 +7402,15 @@ static void DiagnoseConditionalPrecedence(Sema &Self,
<< Condition->getSourceRange()
<< BinaryOperator::getOpcodeStr(CondOpcode);
- SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::note_precedence_silence)
- << BinaryOperator::getOpcodeStr(CondOpcode),
- SourceRange(Condition->getLocStart(), Condition->getLocEnd()));
+ SuggestParentheses(
+ Self, OpLoc,
+ Self.PDiag(diag::note_precedence_silence)
+ << BinaryOperator::getOpcodeStr(CondOpcode),
+ SourceRange(Condition->getBeginLoc(), Condition->getEndLoc()));
SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::note_precedence_conditional_first),
- SourceRange(CondRHS->getLocStart(), RHSExpr->getLocEnd()));
+ Self.PDiag(diag::note_precedence_conditional_first),
+ SourceRange(CondRHS->getBeginLoc(), RHSExpr->getEndLoc()));
}
/// Compute the nullability of a conditional expression.
@@ -7757,7 +7941,12 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
if (isa<PointerType>(RHSType)) {
LangAS AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace();
LangAS AddrSpaceR = RHSType->getPointeeType().getAddressSpace();
- Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
+ if (AddrSpaceL != AddrSpaceR)
+ Kind = CK_AddressSpaceConversion;
+ else if (Context.hasCvrSimilarType(RHSType, LHSType))
+ Kind = CK_NoOp;
+ else
+ Kind = CK_BitCast;
return checkPointerTypesForAssignment(*this, LHSType, RHSType);
}
@@ -7825,7 +8014,7 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
// id -> T^
- if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) {
+ if (getLangOpts().ObjC && RHSType->isObjCIdType()) {
Kind = CK_AnyPointerToBlockPointerCast;
return Compatible;
}
@@ -8029,6 +8218,17 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
ExprResult LocalRHS = CallerRHS;
ExprResult &RHS = ConvertRHS ? CallerRHS : LocalRHS;
+ if (const auto *LHSPtrType = LHSType->getAs<PointerType>()) {
+ if (const auto *RHSPtrType = RHS.get()->getType()->getAs<PointerType>()) {
+ if (RHSPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
+ !LHSPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ Diag(RHS.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << RHS.get()->getSourceRange();
+ }
+ }
+ }
+
if (getLangOpts().CPlusPlus) {
if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
// C++ 5.17p3: If the left operand is not of class type, the
@@ -8092,6 +8292,13 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
return Compatible;
}
+ // OpenCL queue_t type assignment.
+ if (LHSType->isQueueT() && RHS.get()->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNull)) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return Compatible;
+ }
+
// This check seems unnatural, however it is necessary to ensure the proper
// conversion of functions/arrays. If the conversion were done for all
// DeclExpr's (created by ActOnIdExpression), it would mess up the unary
@@ -8104,16 +8311,6 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (RHS.isInvalid())
return Incompatible;
}
-
- Expr *PRE = RHS.get()->IgnoreParenCasts();
- if (Diagnose && isa<ObjCProtocolExpr>(PRE)) {
- ObjCProtocolDecl *PDecl = cast<ObjCProtocolExpr>(PRE)->getProtocol();
- if (PDecl && !PDecl->hasDefinition()) {
- Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl;
- Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
- }
- }
-
CastKind Kind;
Sema::AssignConvertType result =
CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS);
@@ -8137,8 +8334,8 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (!Diagnose)
return Incompatible;
}
- if (getLangOpts().ObjC1 &&
- (CheckObjCBridgeRelatedConversions(E->getLocStart(), LHSType,
+ if (getLangOpts().ObjC &&
+ (CheckObjCBridgeRelatedConversions(E->getBeginLoc(), LHSType,
E->getType(), E, Diagnose) ||
ConversionToObjCStringLiteralCheck(LHSType, E, Diagnose))) {
if (!Diagnose)
@@ -8152,6 +8349,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
if (ConvertRHS)
RHS = ImpCastExprToType(E, Ty, Kind);
}
+
return result;
}
@@ -8314,8 +8512,8 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
// Reject cases where the value of the Int is unknown as that would
// possibly cause truncation, but accept cases where the scalar can be
// demoted without loss of precision.
- llvm::APSInt Result;
- bool CstInt = Int->get()->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context);
int Order = S.Context.getIntegerTypeOrder(OtherIntTy, IntTy);
bool IntSigned = IntTy->hasSignedIntegerRepresentation();
bool OtherIntSigned = OtherIntTy->hasSignedIntegerRepresentation();
@@ -8323,6 +8521,7 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
if (CstInt) {
// If the scalar is constant and is of a higher order and has more active
// bits that the vector element type, reject it.
+ llvm::APSInt Result = EVResult.Val.getInt();
unsigned NumBits = IntSigned
? (Result.isNegative() ? Result.getMinSignedBits()
: Result.getActiveBits())
@@ -8350,8 +8549,9 @@ static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int,
// Determine if the integer constant can be expressed as a floating point
// number of the appropriate type.
- llvm::APSInt Result;
- bool CstInt = Int->get()->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ bool CstInt = Int->get()->EvaluateAsInt(EVResult, S.Context);
+
uint64_t Bits = 0;
if (CstInt) {
// Reject constants that would be truncated if they were converted to
@@ -8359,6 +8559,7 @@ static bool canConvertIntTyToFloatTy(Sema &S, ExprResult *Int,
// FIXME: Ideally the conversion to an APFloat and from an APFloat
// could be avoided if there was a convertFromAPInt method
// which could signal back if implicit truncation occurred.
+ llvm::APSInt Result = EVResult.Val.getInt();
llvm::APFloat Float(S.Context.getFloatTypeSemantics(FloatTy));
Float.convertFromAPInt(Result, IntTy->hasSignedIntegerRepresentation(),
llvm::APFloat::rmTowardZero);
@@ -8668,13 +8869,40 @@ static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
}
+static void DiagnoseDivisionSizeofPointer(Sema &S, Expr *LHS, Expr *RHS,
+ SourceLocation Loc) {
+ const auto *LUE = dyn_cast<UnaryExprOrTypeTraitExpr>(LHS);
+ const auto *RUE = dyn_cast<UnaryExprOrTypeTraitExpr>(RHS);
+ if (!LUE || !RUE)
+ return;
+ if (LUE->getKind() != UETT_SizeOf || LUE->isArgumentType() ||
+ RUE->getKind() != UETT_SizeOf)
+ return;
+
+ QualType LHSTy = LUE->getArgumentExpr()->IgnoreParens()->getType();
+ QualType RHSTy;
+
+ if (RUE->isArgumentType())
+ RHSTy = RUE->getArgumentType();
+ else
+ RHSTy = RUE->getArgumentExpr()->IgnoreParens()->getType();
+
+ if (!LHSTy->isPointerType() || RHSTy->isPointerType())
+ return;
+ if (LHSTy->getPointeeType() != RHSTy)
+ return;
+
+ S.Diag(Loc, diag::warn_division_sizeof_ptr) << LHS << LHS->getSourceRange();
+}
+
static void DiagnoseBadDivideOrRemainderValues(Sema& S, ExprResult &LHS,
ExprResult &RHS,
SourceLocation Loc, bool IsDiv) {
// Check for division/remainder by zero.
- llvm::APSInt RHSValue;
+ Expr::EvalResult RHSValue;
if (!RHS.get()->isValueDependent() &&
- RHS.get()->EvaluateAsInt(RHSValue, S.Context) && RHSValue == 0)
+ RHS.get()->EvaluateAsInt(RHSValue, S.Context) &&
+ RHSValue.Val.getInt() == 0)
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_remainder_division_by_zero)
<< IsDiv << RHS.get()->getSourceRange());
@@ -8698,8 +8926,10 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
if (compType.isNull() || !compType->isArithmeticType())
return InvalidOperands(Loc, LHS, RHS);
- if (IsDiv)
+ if (IsDiv) {
DiagnoseBadDivideOrRemainderValues(*this, LHS, RHS, Loc, IsDiv);
+ DiagnoseDivisionSizeofPointer(*this, LHS.get(), RHS.get(), Loc);
+ }
return compType;
}
@@ -8914,24 +9144,15 @@ static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
if (!IsStringPlusInt || IndexExpr->isValueDependent())
return;
- llvm::APSInt index;
- if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) {
- unsigned StrLenWithNull = StrExpr->getLength() + 1;
- if (index.isNonNegative() &&
- index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull),
- index.isUnsigned()))
- return;
- }
-
- SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ SourceRange DiagRange(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc());
Self.Diag(OpLoc, diag::warn_string_plus_int)
<< DiagRange << IndexExpr->IgnoreImpCasts()->getType();
// Only print a fixit for "str" + int, not for int + "str".
if (IndexExpr == RHSExpr) {
- SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getEndLoc());
Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
- << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateInsertion(LHSExpr->getBeginLoc(), "&")
<< FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
<< FixItHint::CreateInsertion(EndLoc, "]");
} else
@@ -8964,7 +9185,7 @@ static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
return;
ASTContext &Ctx = Self.getASTContext();
- SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ SourceRange DiagRange(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc());
const QualType CharType = CharExpr->getType();
if (!CharType->isAnyCharacterType() &&
@@ -8979,9 +9200,9 @@ static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc,
// Only print a fixit for str + char, not for char + str.
if (isa<CharacterLiteral>(RHSExpr->IgnoreImpCasts())) {
- SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getLocEnd());
+ SourceLocation EndLoc = Self.getLocForEndOfToken(RHSExpr->getEndLoc());
Self.Diag(OpLoc, diag::note_string_plus_scalar_silence)
- << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateInsertion(LHSExpr->getBeginLoc(), "&")
<< FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
<< FixItHint::CreateInsertion(EndLoc, "]");
} else {
@@ -9059,10 +9280,11 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (PExp->IgnoreParenCasts()->isNullPointerConstant(
Context, Expr::NPC_ValueDependentIsNotNull)) {
// In C++ adding zero to a null pointer is defined.
- llvm::APSInt KnownVal;
+ Expr::EvalResult KnownVal;
if (!getLangOpts().CPlusPlus ||
(!IExp->isValueDependent() &&
- (!IExp->EvaluateAsInt(KnownVal, Context) || KnownVal != 0))) {
+ (!IExp->EvaluateAsInt(KnownVal, Context) ||
+ KnownVal.Val.getInt() != 0))) {
// Check the conditions to see if this is the 'p = nullptr + n' idiom.
bool IsGNUIdiom = BinaryOperator::isNullPointerArithmeticExtension(
Context, BO_Add, PExp, IExp);
@@ -9137,10 +9359,11 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->IgnoreParenCasts()->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNotNull)) {
// In C++ adding zero to a null pointer is defined.
- llvm::APSInt KnownVal;
+ Expr::EvalResult KnownVal;
if (!getLangOpts().CPlusPlus ||
(!RHS.get()->isValueDependent() &&
- (!RHS.get()->EvaluateAsInt(KnownVal, Context) || KnownVal != 0))) {
+ (!RHS.get()->EvaluateAsInt(KnownVal, Context) ||
+ KnownVal.Val.getInt() != 0))) {
diagnoseArithmeticOnNullPointer(*this, Loc, LHS.get(), false);
}
}
@@ -9216,11 +9439,12 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
if (S.getLangOpts().OpenCL)
return;
- llvm::APSInt Right;
// Check right/shifter operand
+ Expr::EvalResult RHSResult;
if (RHS.get()->isValueDependent() ||
- !RHS.get()->EvaluateAsInt(Right, S.Context))
+ !RHS.get()->EvaluateAsInt(RHSResult, S.Context))
return;
+ llvm::APSInt Right = RHSResult.Val.getInt();
if (Right.isNegative()) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
@@ -9243,11 +9467,12 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned
// integers have defined behavior modulo one more than the maximum value
// representable in the result type, so never warn for those.
- llvm::APSInt Left;
+ Expr::EvalResult LHSResult;
if (LHS.get()->isValueDependent() ||
LHSType->hasUnsignedIntegerRepresentation() ||
- !LHS.get()->EvaluateAsInt(Left, S.Context))
+ !LHS.get()->EvaluateAsInt(LHSResult, S.Context))
return;
+ llvm::APSInt Left = LHSResult.Val.getInt();
// If LHS does not have a signed type and non-negative value
// then, the behavior is undefined. Warn about it.
@@ -9653,8 +9878,8 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
if (BinaryOperator::isEqualityOp(Opc) &&
hasIsEqualMethod(S, LHS.get(), RHS.get())) {
- SourceLocation Start = LHS.get()->getLocStart();
- SourceLocation End = S.getLocForEndOfToken(RHS.get()->getLocEnd());
+ SourceLocation Start = LHS.get()->getBeginLoc();
+ SourceLocation End = S.getLocForEndOfToken(RHS.get()->getEndLoc());
CharSourceRange OpRange =
CharSourceRange::getCharRange(Loc, S.getLocForEndOfToken(Loc));
@@ -9686,8 +9911,8 @@ static void diagnoseLogicalNotOnLHSofCheck(Sema &S, ExprResult &LHS,
<< Loc << IsBitwiseOp;
// First note suggest !(x < y)
- SourceLocation FirstOpen = SubExpr->getLocStart();
- SourceLocation FirstClose = RHS.get()->getLocEnd();
+ SourceLocation FirstOpen = SubExpr->getBeginLoc();
+ SourceLocation FirstClose = RHS.get()->getEndLoc();
FirstClose = S.getLocForEndOfToken(FirstClose);
if (FirstClose.isInvalid())
FirstOpen = SourceLocation();
@@ -9697,8 +9922,8 @@ static void diagnoseLogicalNotOnLHSofCheck(Sema &S, ExprResult &LHS,
<< FixItHint::CreateInsertion(FirstClose, ")");
// Second note suggests (!x) < y
- SourceLocation SecondOpen = LHS.get()->getLocStart();
- SourceLocation SecondClose = LHS.get()->getLocEnd();
+ SourceLocation SecondOpen = LHS.get()->getBeginLoc();
+ SourceLocation SecondClose = LHS.get()->getEndLoc();
SecondClose = S.getLocForEndOfToken(SecondClose);
if (SecondClose.isInvalid())
SecondOpen = SourceLocation();
@@ -9734,7 +9959,7 @@ static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
QualType RHSType = RHS->getType();
if (LHSType->hasFloatingRepresentation() ||
(LHSType->isBlockPointerType() && !BinaryOperator::isEqualityOp(Opc)) ||
- LHS->getLocStart().isMacroID() || RHS->getLocStart().isMacroID() ||
+ LHS->getBeginLoc().isMacroID() || RHS->getBeginLoc().isMacroID() ||
S.inTemplateInstantiation())
return;
@@ -9888,7 +10113,7 @@ static bool checkThreeWayNarrowingConversion(Sema &S, QualType ToType, Expr *E,
case NK_Constant_Narrowing:
// Implicit conversion to a narrower type, and the value is not a constant
// expression.
- S.Diag(E->getLocStart(), diag::err_spaceship_argument_narrowing)
+ S.Diag(E->getBeginLoc(), diag::err_spaceship_argument_narrowing)
<< /*Constant*/ 1
<< PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << ToType;
return true;
@@ -9897,7 +10122,7 @@ static bool checkThreeWayNarrowingConversion(Sema &S, QualType ToType, Expr *E,
// Implicit conversion to a narrower type, and the value is not a constant
// expression.
case NK_Type_Narrowing:
- S.Diag(E->getLocStart(), diag::err_spaceship_argument_narrowing)
+ S.Diag(E->getBeginLoc(), diag::err_spaceship_argument_narrowing)
<< /*Constant*/ 0 << FromType << ToType;
// TODO: It's not a constant expression, but what if the user intended it
// to be? Can we produce notes to help them figure out why it isn't?
@@ -9972,9 +10197,9 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
assert(Type->isArithmeticType() || Type->isEnumeralType());
bool HasNarrowing = checkThreeWayNarrowingConversion(
- S, Type, LHS.get(), LHSType, LHS.get()->getLocStart());
- HasNarrowing |= checkThreeWayNarrowingConversion(
- S, Type, RHS.get(), RHSType, RHS.get()->getLocStart());
+ S, Type, LHS.get(), LHSType, LHS.get()->getBeginLoc());
+ HasNarrowing |= checkThreeWayNarrowingConversion(S, Type, RHS.get(), RHSType,
+ RHS.get()->getBeginLoc());
if (HasNarrowing)
return QualType();
@@ -10439,6 +10664,14 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
}
if (getLangOpts().OpenCLVersion >= 200) {
+ if (LHSType->isClkEventT() && RHSType->isClkEventT()) {
+ return computeResultTy();
+ }
+
+ if (LHSType->isQueueT() && RHSType->isQueueT()) {
+ return computeResultTy();
+ }
+
if (LHSIsNull && RHSType->isQueueT()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
return computeResultTy();
@@ -10609,8 +10842,9 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
// Parens on the RHS are ignored.
- llvm::APSInt Result;
- if (RHS.get()->EvaluateAsInt(Result, Context))
+ Expr::EvalResult EVResult;
+ if (RHS.get()->EvaluateAsInt(EVResult, Context)) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
!RHS.get()->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
@@ -10627,9 +10861,10 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
<< FixItHint::CreateRemoval(
- SourceRange(getLocForEndOfToken(LHS.get()->getLocEnd()),
- RHS.get()->getLocEnd()));
+ SourceRange(getLocForEndOfToken(LHS.get()->getEndLoc()),
+ RHS.get()->getEndLoc()));
}
+ }
}
if (!Context.getLangOpts().CPlusPlus) {
@@ -10880,30 +11115,38 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
const RecordType *Ty,
SourceLocation Loc, SourceRange Range,
OriginalExprKind OEK,
- bool &DiagnosticEmitted,
- bool IsNested = false) {
+ bool &DiagnosticEmitted) {
+ std::vector<const RecordType *> RecordTypeList;
+ RecordTypeList.push_back(Ty);
+ unsigned NextToCheckIndex = 0;
// We walk the record hierarchy breadth-first to ensure that we print
// diagnostics in field nesting order.
- // First, check every field for constness.
- for (const FieldDecl *Field : Ty->getDecl()->fields()) {
- if (Field->getType().isConstQualified()) {
- if (!DiagnosticEmitted) {
- S.Diag(Loc, diag::err_typecheck_assign_const)
- << Range << NestedConstMember << OEK << VD
- << IsNested << Field;
- DiagnosticEmitted = true;
+ while (RecordTypeList.size() > NextToCheckIndex) {
+ bool IsNested = NextToCheckIndex > 0;
+ for (const FieldDecl *Field :
+ RecordTypeList[NextToCheckIndex]->getDecl()->fields()) {
+ // First, check every field for constness.
+ QualType FieldTy = Field->getType();
+ if (FieldTy.isConstQualified()) {
+ if (!DiagnosticEmitted) {
+ S.Diag(Loc, diag::err_typecheck_assign_const)
+ << Range << NestedConstMember << OEK << VD
+ << IsNested << Field;
+ DiagnosticEmitted = true;
+ }
+ S.Diag(Field->getLocation(), diag::note_typecheck_assign_const)
+ << NestedConstMember << IsNested << Field
+ << FieldTy << Field->getSourceRange();
+ }
+
+ // Then we append it to the list to check next in order.
+ FieldTy = FieldTy.getCanonicalType();
+ if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
+ if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ RecordTypeList.push_back(FieldRecTy);
}
- S.Diag(Field->getLocation(), diag::note_typecheck_assign_const)
- << NestedConstMember << IsNested << Field
- << Field->getType() << Field->getSourceRange();
}
- }
- // Then, recurse.
- for (const FieldDecl *Field : Ty->getDecl()->fields()) {
- QualType FTy = Field->getType();
- if (const RecordType *FieldRecTy = FTy->getAs<RecordType>())
- DiagnoseRecursiveConstFields(S, VD, FieldRecTy, Loc, Range,
- OEK, DiagnosticEmitted, true);
+ ++NextToCheckIndex;
}
}
@@ -10971,17 +11214,23 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
if (var->isARCPseudoStrong() &&
(!var->getTypeSourceInfo() ||
!var->getTypeSourceInfo()->getType().isConstQualified())) {
- // There are two pseudo-strong cases:
+ // There are three pseudo-strong cases:
// - self
ObjCMethodDecl *method = S.getCurMethodDecl();
- if (method && var == method->getSelfDecl())
+ if (method && var == method->getSelfDecl()) {
DiagID = method->isClassMethod()
? diag::err_typecheck_arc_assign_self_class_method
: diag::err_typecheck_arc_assign_self;
+ // - Objective-C externally_retained attribute.
+ } else if (var->hasAttr<ObjCExternallyRetainedAttr>() ||
+ isa<ParmVarDecl>(var)) {
+ DiagID = diag::err_typecheck_arc_assign_externally_retained;
+
// - fast enumeration variables
- else
+ } else {
DiagID = diag::err_typecheck_arr_assign_enumeration;
+ }
SourceRange Assign;
if (Loc != OrigLoc)
@@ -11150,15 +11399,14 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck))
RHSCheck = ICE->getSubExpr();
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) {
- if ((UO->getOpcode() == UO_Plus ||
- UO->getOpcode() == UO_Minus) &&
+ if ((UO->getOpcode() == UO_Plus || UO->getOpcode() == UO_Minus) &&
Loc.isFileID() && UO->getOperatorLoc().isFileID() &&
// Only if the two operators are exactly adjacent.
Loc.getLocWithOffset(1) == UO->getOperatorLoc() &&
// And there is a space or other character before the subexpr of the
// unary +/-. We don't want to warn on "x=-1".
- Loc.getLocWithOffset(2) != UO->getSubExpr()->getLocStart() &&
- UO->getSubExpr()->getLocStart().isFileID()) {
+ Loc.getLocWithOffset(2) != UO->getSubExpr()->getBeginLoc() &&
+ UO->getSubExpr()->getBeginLoc().isFileID()) {
Diag(Loc, diag::warn_not_compound_assign)
<< (UO->getOpcode() == UO_Plus ? "+" : "-")
<< SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc());
@@ -11188,7 +11436,7 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// For ObjCWeak only, we do not warn if the assign is to a non-weak
// variable, which will be valid for the current autorelease scope.
if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
- RHS.get()->getLocStart()))
+ RHS.get()->getBeginLoc()))
getCurFunction()->markSafeWeakUse(RHS.get());
} else if (getLangOpts().ObjCAutoRefCount || getLangOpts().ObjCWeak) {
@@ -11225,6 +11473,12 @@ static bool IgnoreCommaOperand(const Expr *E) {
if (CE->getCastKind() == CK_ToVoid) {
return true;
}
+
+ // static_cast<void> on a dependent type will not show up as CK_ToVoid.
+ if (CE->getCastKind() == CK_Dependent && E->getType()->isVoidType() &&
+ CE->getSubExpr()->getType()->isDependentType()) {
+ return true;
+ }
}
return false;
@@ -11248,8 +11502,11 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
// The whitelisted locations are the initialization and increment portions
// of a for loop. The additional checks are on the condition of
// if statements, do/while loops, and for loops.
+ // Differences in scope flags for C89 mode requires the extra logic.
const unsigned ForIncrementFlags =
- Scope::ControlScope | Scope::ContinueScope | Scope::BreakScope;
+ getLangOpts().C99 || getLangOpts().CPlusPlus
+ ? Scope::ControlScope | Scope::ContinueScope | Scope::BreakScope
+ : Scope::ContinueScope | Scope::BreakScope;
const unsigned ForInitFlags = Scope::ControlScope | Scope::DeclScope;
const unsigned ScopeFlags = getCurScope()->getFlags();
if ((ScopeFlags & ForIncrementFlags) == ForIncrementFlags ||
@@ -11269,12 +11526,12 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
return;
Diag(Loc, diag::warn_comma_operator);
- Diag(LHS->getLocStart(), diag::note_cast_to_void)
+ Diag(LHS->getBeginLoc(), diag::note_cast_to_void)
<< LHS->getSourceRange()
- << FixItHint::CreateInsertion(LHS->getLocStart(),
+ << FixItHint::CreateInsertion(LHS->getBeginLoc(),
LangOpts.CPlusPlus ? "static_cast<void>("
: "(void)(")
- << FixItHint::CreateInsertion(PP.getLocForEndOfToken(LHS->getLocEnd()),
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(LHS->getEndLoc()),
")");
}
@@ -11551,7 +11808,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (auto *FD = dyn_cast_or_null<FunctionDecl>(dcl))
if (!checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
- op->getLocStart()))
+ op->getBeginLoc()))
return QualType();
Expr::LValueClassification lval = op->ClassifyLValue(Context);
@@ -11877,7 +12134,7 @@ static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
/// is usually indicative of introspection within the Objective-C pointer.
static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R,
SourceLocation OpLoc) {
- if (!S.getLangOpts().ObjC1)
+ if (!S.getLangOpts().ObjC)
return;
const Expr *ObjCPointerExpr = nullptr, *OtherExpr = nullptr;
@@ -12006,7 +12263,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
// The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning
// of x = {} is x = T().
InitializationKind Kind = InitializationKind::CreateDirectList(
- RHSExpr->getLocStart(), RHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ RHSExpr->getBeginLoc(), RHSExpr->getBeginLoc(), RHSExpr->getEndLoc());
InitializedEntity Entity =
InitializedEntity::InitializeTemporary(LHSExpr->getType());
InitializationSequence InitSeq(*this, Entity, Kind, RHSExpr);
@@ -12035,7 +12292,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
// OpenCLC v2.0 s6.13.11.1 allows atomic variables to be initialized by
// the ATOMIC_VAR_INIT macro.
if (LHSTy->isAtomicType() || RHSTy->isAtomicType()) {
- SourceRange SR(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ SourceRange SR(LHSExpr->getBeginLoc(), RHSExpr->getEndLoc());
if (BO_Assign == Opc)
Diag(OpLoc, diag::err_opencl_atomic_init) << 0 << SR;
else
@@ -12197,11 +12454,13 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
&Context.Idents.get("object_setClass"),
SourceLocation(), LookupOrdinaryName);
if (ObjectSetClass && isa<ObjCIsaExpr>(LHS.get())) {
- SourceLocation RHSLocEnd = getLocForEndOfToken(RHS.get()->getLocEnd());
- Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign) <<
- FixItHint::CreateInsertion(LHS.get()->getLocStart(), "object_setClass(") <<
- FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc), ",") <<
- FixItHint::CreateInsertion(RHSLocEnd, ")");
+ SourceLocation RHSLocEnd = getLocForEndOfToken(RHS.get()->getEndLoc());
+ Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign)
+ << FixItHint::CreateInsertion(LHS.get()->getBeginLoc(),
+ "object_setClass(")
+ << FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc),
+ ",")
+ << FixItHint::CreateInsertion(RHSLocEnd, ")");
}
else
Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign);
@@ -12258,13 +12517,14 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
if (isLeftBitwise || isRightBitwise)
return;
- SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(),
- OpLoc)
- : SourceRange(OpLoc, RHSExpr->getLocEnd());
+ SourceRange DiagRange = isLeftComp
+ ? SourceRange(LHSExpr->getBeginLoc(), OpLoc)
+ : SourceRange(OpLoc, RHSExpr->getEndLoc());
StringRef OpStr = isLeftComp ? LHSBO->getOpcodeStr() : RHSBO->getOpcodeStr();
- SourceRange ParensRange = isLeftComp ?
- SourceRange(LHSBO->getRHS()->getLocStart(), RHSExpr->getLocEnd())
- : SourceRange(LHSExpr->getLocStart(), RHSBO->getLHS()->getLocEnd());
+ SourceRange ParensRange =
+ isLeftComp
+ ? SourceRange(LHSBO->getRHS()->getBeginLoc(), RHSExpr->getEndLoc())
+ : SourceRange(LHSExpr->getBeginLoc(), RHSBO->getLHS()->getEndLoc());
Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
<< DiagRange << BinaryOperator::getOpcodeStr(Opc) << OpStr;
@@ -12398,10 +12658,9 @@ static void DiagnoseShiftCompare(Sema &S, SourceLocation OpLoc,
S.PDiag(diag::note_precedence_silence)
<< (Kind == OO_LessLess ? "<<" : ">>"),
OCE->getSourceRange());
- SuggestParentheses(S, OpLoc,
- S.PDiag(diag::note_evaluate_comparison_first),
- SourceRange(OCE->getArg(1)->getLocStart(),
- RHSExpr->getLocEnd()));
+ SuggestParentheses(
+ S, OpLoc, S.PDiag(diag::note_evaluate_comparison_first),
+ SourceRange(OCE->getArg(1)->getBeginLoc(), RHSExpr->getEndLoc()));
}
/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
@@ -12643,6 +12902,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
case UO_AddrOf:
resultType = CheckAddressOfOperand(Input, OpLoc);
+ CheckAddressOfNoDeref(InputExpr);
RecordModifiableNonNullParam(*this, InputExpr);
break;
case UO_Deref: {
@@ -12807,6 +13067,11 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
auto *UO = new (Context)
UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc, CanOverflow);
+
+ if (Opc == UO_Deref && UO->getType()->hasAttr(attr::NoDeref) &&
+ !isa<ArrayType>(UO->getType().getDesugaredType(Context)))
+ ExprEvalContexts.back().PossibleDerefs.insert(UO);
+
// Convert the result back to a half vector.
if (ConvertHalfVec)
return convertVector(UO, Context.HalfTy, *this);
@@ -13071,9 +13336,9 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
// FIXME: An integral constant expression?
if (!Idx->isTypeDependent() && !Idx->isValueDependent() &&
!Idx->getType()->isIntegerType())
- return ExprError(Diag(Idx->getLocStart(),
- diag::err_typecheck_subscript_not_integer)
- << Idx->getSourceRange());
+ return ExprError(
+ Diag(Idx->getBeginLoc(), diag::err_typecheck_subscript_not_integer)
+ << Idx->getSourceRange());
// Record this array index.
Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd));
@@ -13294,7 +13559,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
// Drop the parameters.
FunctionProtoType::ExtProtoInfo EPI;
EPI.HasTrailingReturn = false;
- EPI.TypeQuals |= DeclSpec::TQ_const;
+ EPI.TypeQuals.addConst();
T = Context.getFunctionType(Context.DependentTy, None, EPI);
Sig = Context.getTrivialTypeSourceInfo(T);
}
@@ -13365,7 +13630,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
} else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) {
for (const auto &I : Fn->param_types()) {
ParmVarDecl *Param = BuildParmVarDeclForTypedef(
- CurBlock->TheDecl, ParamInfo.getLocStart(), I);
+ CurBlock->TheDecl, ParamInfo.getBeginLoc(), I);
Params.push_back(Param);
}
}
@@ -13421,6 +13686,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
PopExpressionEvaluationContext();
BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
+ BlockDecl *BD = BSI->TheDecl;
if (BSI->HasImplicitReturnType)
deduceClosureReturnType(*BSI);
@@ -13431,7 +13697,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (!BSI->ReturnType.isNull())
RetTy = BSI->ReturnType;
- bool NoReturn = BSI->TheDecl->hasAttr<NoReturnAttr>();
+ bool NoReturn = BD->hasAttr<NoReturnAttr>();
QualType BlockTy;
// Set the captured variables on the block.
@@ -13444,7 +13710,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
Cap.isNested(), Cap.getInitExpr());
Captures.push_back(NewCap);
}
- BSI->TheDecl->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
+ BD->setCaptures(Context, Captures, BSI->CXXThisCaptureIndex != 0);
// If the user wrote a function type in some form, try to use that.
if (!BSI->FunctionType.isNull()) {
@@ -13469,7 +13735,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals = 0; // FIXME: silently?
+ EPI.TypeQuals = Qualifiers();
EPI.ExtInfo = Ext;
BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI);
}
@@ -13481,7 +13747,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BlockTy = Context.getFunctionType(RetTy, None, EPI);
}
- DiagnoseUnusedParameters(BSI->TheDecl->parameters());
+ DiagnoseUnusedParameters(BD->parameters());
BlockTy = Context.getBlockPointerType(BlockTy);
// If needed, diagnose invalid gotos and switches in the block.
@@ -13489,19 +13755,19 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
!PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
- BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
+ BD->setBody(cast<CompoundStmt>(Body));
if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
- DiagnoseUnguardedAvailabilityViolations(BSI->TheDecl);
+ DiagnoseUnguardedAvailabilityViolations(BD);
// Try to apply the named return value optimization. We have to check again
// if we can do this, though, because blocks keep return statements around
// to deduce an implicit return type.
if (getLangOpts().CPlusPlus && RetTy->isRecordType() &&
- !BSI->TheDecl->isDependentContext())
+ !BD->isDependentContext())
computeNRVO(Body, BSI);
- BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
+ BlockExpr *Result = new (Context) BlockExpr(BD, BlockTy);
AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
@@ -13523,6 +13789,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
}
}
+ if (getCurFunction())
+ getCurFunction()->addBlock(BD);
+
return Result;
}
@@ -13544,7 +13813,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
if (const FunctionDecl *F = dyn_cast<FunctionDecl>(CurContext)) {
CUDAFunctionTarget T = IdentifyCUDATarget(F);
if (T == CFT_Global || T == CFT_Device || T == CFT_HostDevice)
- return ExprError(Diag(E->getLocStart(), diag::err_va_arg_in_device));
+ return ExprError(Diag(E->getBeginLoc(), diag::err_va_arg_in_device));
}
}
@@ -13594,9 +13863,10 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
if (!IsMS && !E->isTypeDependent() &&
!Context.hasSameType(VaListType, E->getType()))
- return ExprError(Diag(E->getLocStart(),
- diag::err_first_argument_to_va_arg_not_of_type_va_list)
- << OrigExpr->getType() << E->getSourceRange());
+ return ExprError(
+ Diag(E->getBeginLoc(),
+ diag::err_first_argument_to_va_arg_not_of_type_va_list)
+ << OrigExpr->getType() << E->getSourceRange());
if (!TInfo->getType()->isDependentType()) {
if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
@@ -13661,7 +13931,7 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
bool Diagnose) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
@@ -13687,9 +13957,9 @@ bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp,
if (!SL || !SL->isAscii())
return false;
if (Diagnose) {
- Diag(SL->getLocStart(), diag::err_missing_atsign_prefix)
- << FixItHint::CreateInsertion(SL->getLocStart(), "@");
- Exp = BuildObjCStringLiteral(SL->getLocStart(), SL).get();
+ Diag(SL->getBeginLoc(), diag::err_missing_atsign_prefix)
+ << FixItHint::CreateInsertion(SL->getBeginLoc(), "@");
+ Exp = BuildObjCStringLiteral(SL->getBeginLoc(), SL).get();
}
return true;
}
@@ -13710,7 +13980,7 @@ static bool maybeDiagnoseAssignmentToFunction(Sema &S, QualType DstType,
return !S.checkAddressOfFunctionIsAvailable(FD,
/*Complain=*/true,
- SrcExpr->getLocStart());
+ SrcExpr->getBeginLoc());
}
bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
@@ -13963,7 +14233,7 @@ ExprResult
Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold) {
- SourceLocation DiagLoc = E->getLocStart();
+ SourceLocation DiagLoc = E->getBeginLoc();
if (getLangOpts().CPlusPlus11) {
// C++11 [expr.const]p5:
@@ -14030,11 +14300,14 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
return ExprError();
}
+ if (!isa<ConstantExpr>(E))
+ E = ConstantExpr::Create(Context, E);
+
// Circumvent ICE checking in C++11 to avoid evaluating the expression twice
// in the non-ICE case.
if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) {
if (Result)
- *Result = E->EvaluateKnownConstInt(Context);
+ *Result = E->EvaluateKnownConstIntCheckOverflow(Context);
return E;
}
@@ -14165,6 +14438,51 @@ Sema::PushExpressionEvaluationContext(
PushExpressionEvaluationContext(NewContext, ClosureContextDecl, ExprContext);
}
+namespace {
+
+const DeclRefExpr *CheckPossibleDeref(Sema &S, const Expr *PossibleDeref) {
+ PossibleDeref = PossibleDeref->IgnoreParenImpCasts();
+ if (const auto *E = dyn_cast<UnaryOperator>(PossibleDeref)) {
+ if (E->getOpcode() == UO_Deref)
+ return CheckPossibleDeref(S, E->getSubExpr());
+ } else if (const auto *E = dyn_cast<ArraySubscriptExpr>(PossibleDeref)) {
+ return CheckPossibleDeref(S, E->getBase());
+ } else if (const auto *E = dyn_cast<MemberExpr>(PossibleDeref)) {
+ return CheckPossibleDeref(S, E->getBase());
+ } else if (const auto E = dyn_cast<DeclRefExpr>(PossibleDeref)) {
+ QualType Inner;
+ QualType Ty = E->getType();
+ if (const auto *Ptr = Ty->getAs<PointerType>())
+ Inner = Ptr->getPointeeType();
+ else if (const auto *Arr = S.Context.getAsArrayType(Ty))
+ Inner = Arr->getElementType();
+ else
+ return nullptr;
+
+ if (Inner->hasAttr(attr::NoDeref))
+ return E;
+ }
+ return nullptr;
+}
+
+} // namespace
+
+void Sema::WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec) {
+ for (const Expr *E : Rec.PossibleDerefs) {
+ const DeclRefExpr *DeclRef = CheckPossibleDeref(*this, E);
+ if (DeclRef) {
+ const ValueDecl *Decl = DeclRef->getDecl();
+ Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type)
+ << Decl->getName() << E->getSourceRange();
+ Diag(Decl->getLocation(), diag::note_previous_decl) << Decl->getName();
+ } else {
+ Diag(E->getExprLoc(), diag::warn_dereference_of_noderef_type_no_decl)
+ << E->getSourceRange();
+ }
+ }
+ Rec.PossibleDerefs.clear();
+}
+
void Sema::PopExpressionEvaluationContext() {
ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
unsigned NumTypos = Rec.NumTypos;
@@ -14193,7 +14511,7 @@ void Sema::PopExpressionEvaluationContext() {
llvm_unreachable("Couldn't infer lambda error message.");
for (const auto *L : Rec.Lambdas)
- Diag(L->getLocStart(), D);
+ Diag(L->getBeginLoc(), D);
} else {
// Mark the capture expressions odr-used. This was deferred
// during lambda expression creation.
@@ -14204,6 +14522,8 @@ void Sema::PopExpressionEvaluationContext() {
}
}
+ WarnOnPendingNoDerefs(Rec);
+
// When are coming out of an unevaluated context, clear out any
// temporaries that we may have created as part of the evaluation of
// the expression in that context: they aren't relevant because they
@@ -14224,11 +14544,8 @@ void Sema::PopExpressionEvaluationContext() {
// Pop the current expression evaluation context off the stack.
ExprEvalContexts.pop_back();
- if (!ExprEvalContexts.empty())
- ExprEvalContexts.back().NumTypos += NumTypos;
- else
- assert(NumTypos == 0 && "There are outstanding typos after popping the "
- "last ExpressionEvaluationContextRecord");
+ // The global expression evaluation context record is never popped.
+ ExprEvalContexts.back().NumTypos += NumTypos;
}
void Sema::DiscardCleanupsInEvaluationContext() {
@@ -14240,6 +14557,10 @@ void Sema::DiscardCleanupsInEvaluationContext() {
}
ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) {
+ ExprResult Result = CheckPlaceholderExpr(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.get();
if (!E->getType()->isVariablyModifiedType())
return E;
return TransformToPotentiallyEvaluated(E);
@@ -14641,8 +14962,10 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
Expr *CopyExpr = nullptr;
bool ByRef = false;
- // Blocks are not allowed to capture arrays.
- if (CaptureType->isArrayType()) {
+ // Blocks are not allowed to capture arrays, excepting OpenCL.
+ // OpenCL v2.0 s1.12.5 (revision 40): arrays are captured by reference
+ // (decayed to pointers).
+ if (!S.getLangOpts().OpenCL && CaptureType->isArrayType()) {
if (BuildAndDiagnose) {
S.Diag(Loc, diag::err_ref_array_type);
S.Diag(Var->getLocation(), diag::note_previous_decl)
@@ -14665,15 +14988,15 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
// Warn about implicitly autoreleasing indirect parameters captured by blocks.
if (const auto *PT = CaptureType->getAs<PointerType>()) {
// This function finds out whether there is an AttributedType of kind
- // attr_objc_ownership in Ty. The existence of AttributedType of kind
- // attr_objc_ownership implies __autoreleasing was explicitly specified
+ // attr::ObjCOwnership in Ty. The existence of AttributedType of kind
+ // attr::ObjCOwnership implies __autoreleasing was explicitly specified
// rather than being added implicitly by the compiler.
auto IsObjCOwnershipAttributedType = [](QualType Ty) {
while (const auto *AttrTy = Ty->getAs<AttributedType>()) {
- if (AttrTy->getAttrKind() == AttributedType::attr_objc_ownership)
+ if (AttrTy->getAttrKind() == attr::ObjCOwnership)
return true;
- // Peel off AttributedTypes that are not of kind objc_ownership.
+ // Peel off AttributedTypes that are not of kind ObjCOwnership.
Ty = AttrTy->getModifiedType();
}
@@ -14722,9 +15045,8 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
// According to the blocks spec, the capture of a variable from
// the stack requires a const copy constructor. This is not true
// of the copy/move done to move a __block variable to the heap.
- Expr *DeclRef = new (S.Context) DeclRefExpr(Var, Nested,
- DeclRefType.withConst(),
- VK_LValue, Loc);
+ Expr *DeclRef = new (S.Context) DeclRefExpr(
+ S.Context, Var, Nested, DeclRefType.withConst(), VK_LValue, Loc);
ExprResult Result
= S.PerformCopyInitialization(
@@ -14800,8 +15122,8 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP)
S.setOpenMPCaptureKind(Field, Var, RSI->OpenMPLevel);
- CopyExpr = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable,
- DeclRefType, VK_LValue, Loc);
+ CopyExpr = new (S.Context) DeclRefExpr(
+ S.Context, Var, RefersToCapturedVariable, DeclRefType, VK_LValue, Loc);
Var->setReferenced(true);
Var->markUsed(S.Context);
}
@@ -14828,6 +15150,21 @@ static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI,
= FieldDecl::Create(S.Context, Lambda, Loc, Loc, nullptr, FieldType,
S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
nullptr, false, ICIS_NoInit);
+ // If the variable being captured has an invalid type, mark the lambda class
+ // as invalid as well.
+ if (!FieldType->isDependentType()) {
+ if (S.RequireCompleteType(Loc, FieldType, diag::err_field_incomplete)) {
+ Lambda->setInvalidDecl();
+ Field->setInvalidDecl();
+ } else {
+ NamedDecl *Def;
+ FieldType->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ Lambda->setInvalidDecl();
+ Field->setInvalidDecl();
+ }
+ }
+ }
Field->setImplicit(true);
Field->setAccess(AS_private);
Lambda->addDecl(Field);
@@ -15023,7 +15360,7 @@ bool Sema::tryCaptureVariable(
Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName();
Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
- Diag(LSI->Lambda->getLocStart(), diag::note_lambda_decl);
+ Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl);
} else
diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC);
}
@@ -15077,7 +15414,7 @@ bool Sema::tryCaptureVariable(
Diag(Var->getLocation(), diag::note_previous_decl)
<< Var->getDeclName();
if (cast<LambdaScopeInfo>(CSI)->Lambda)
- Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getLocStart(),
+ Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getBeginLoc(),
diag::note_lambda_decl);
// FIXME: If we error out because an outer lambda can not implicitly
// capture a variable that an inner lambda explicitly captures, we
@@ -15435,8 +15772,8 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
if (Method->isPure())
MightBeOdrUse = false;
}
- SourceLocation Loc = E->getMemberLoc().isValid() ?
- E->getMemberLoc() : E->getLocStart();
+ SourceLocation Loc =
+ E->getMemberLoc().isValid() ? E->getMemberLoc() : E->getBeginLoc();
MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse);
}
@@ -15532,34 +15869,34 @@ namespace {
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
- S.MarkFunctionReferenced(E->getLocStart(),
- const_cast<CXXDestructorDecl*>(E->getTemporary()->getDestructor()));
+ S.MarkFunctionReferenced(
+ E->getBeginLoc(),
+ const_cast<CXXDestructorDecl *>(E->getTemporary()->getDestructor()));
Visit(E->getSubExpr());
}
void VisitCXXNewExpr(CXXNewExpr *E) {
if (E->getOperatorNew())
- S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorNew());
+ S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorNew());
if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
Inherited::VisitCXXNewExpr(E);
}
void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (E->getOperatorDelete())
- S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ S.MarkFunctionReferenced(E->getBeginLoc(), E->getOperatorDelete());
QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- S.MarkFunctionReferenced(E->getLocStart(),
- S.LookupDestructor(Record));
+ S.MarkFunctionReferenced(E->getBeginLoc(), S.LookupDestructor(Record));
}
Inherited::VisitCXXDeleteExpr(E);
}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
- S.MarkFunctionReferenced(E->getLocStart(), E->getConstructor());
+ S.MarkFunctionReferenced(E->getBeginLoc(), E->getConstructor());
Inherited::VisitCXXConstructExpr(E);
}
@@ -15730,7 +16067,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
Diag(Loc, diagnostic) << E->getSourceRange();
- SourceLocation Open = E->getLocStart();
+ SourceLocation Open = E->getBeginLoc();
SourceLocation Close = getLocForEndOfToken(E->getSourceRange().getEnd());
Diag(Loc, diag::note_condition_assign_silence)
<< FixItHint::CreateInsertion(Open, "(")
@@ -15748,7 +16085,7 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
/// that the user intended an assignment used as condition.
void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
// Don't warn if the parens came from a macro.
- SourceLocation parenLoc = ParenE->getLocStart();
+ SourceLocation parenLoc = ParenE->getBeginLoc();
if (parenLoc.isInvalid() || parenLoc.isMacroID())
return;
// Don't warn for dependent expressions.
@@ -16211,7 +16548,7 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) {
SourceLocation Loc = FD->getLocation();
- FunctionDecl *NewFD = FunctionDecl::Create(FD->getASTContext(),
+ FunctionDecl *NewFD = FunctionDecl::Create(S.Context,
FD->getDeclContext(),
Loc, Loc, FD->getNameInfo().getName(),
DestType, FD->getTypeSourceInfo(),
@@ -16439,25 +16776,29 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
auto *FD = cast<FunctionDecl>(DRE->getDecl());
if (FD->getBuiltinID() == Builtin::BI__noop) {
E = ImpCastExprToType(E, Context.getPointerType(FD->getType()),
- CK_BuiltinFnToFnPtr).get();
- return new (Context) CallExpr(Context, E, None, Context.IntTy,
- VK_RValue, SourceLocation());
+ CK_BuiltinFnToFnPtr)
+ .get();
+ return CallExpr::Create(Context, E, /*Args=*/{}, Context.IntTy,
+ VK_RValue, SourceLocation());
}
}
- Diag(E->getLocStart(), diag::err_builtin_fn_use);
+ Diag(E->getBeginLoc(), diag::err_builtin_fn_use);
return ExprError();
}
// Expressions of unknown type.
case BuiltinType::OMPArraySection:
- Diag(E->getLocStart(), diag::err_omp_array_section_use);
+ Diag(E->getBeginLoc(), diag::err_omp_array_section_use);
return ExprError();
// Everything else should be impossible.
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index a1168fa34d56..8c89a3cee3db 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -113,9 +113,15 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II,
break;
}
}
- if (!InjectedClassName && CurClass->isInvalidDecl())
+ if (!InjectedClassName) {
+ if (!CurClass->isInvalidDecl()) {
+ // FIXME: RequireCompleteDeclContext doesn't check dependent contexts
+ // properly. Work around it here for now.
+ Diag(SS.getLastQualifierNameLoc(),
+ diag::err_incomplete_nested_name_spec) << CurClass << SS.getRange();
+ }
return ParsedType();
- assert(InjectedClassName && "couldn't find injected class name");
+ }
QualType T = Context.getTypeDeclType(InjectedClassName);
DiagnoseUseOfDecl(InjectedClassName, NameLoc);
@@ -413,8 +419,8 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
// namespace scope. Therefore, this unqualified-id cannot name anything.
// Reject it early, because we have no AST representation for this in the
// case where the scope is dependent.
- Diag(Name.getLocStart(), diag::err_literal_operator_id_outside_namespace)
- << SS.getScopeRep();
+ Diag(Name.getBeginLoc(), diag::err_literal_operator_id_outside_namespace)
+ << SS.getScopeRep();
return true;
case NestedNameSpecifier::Global:
@@ -1058,7 +1064,7 @@ QualType Sema::getCurrentThisType() {
if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
if (method && method->isInstance())
- ThisTy = method->getThisType(Context);
+ ThisTy = method->getThisType();
}
if (ThisTy.isNull() && isLambdaCallOperator(CurContext) &&
@@ -1088,7 +1094,7 @@ QualType Sema::getCurrentThisType() {
Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
Decl *ContextDecl,
- unsigned CXXThisTypeQuals,
+ Qualifiers CXXThisTypeQuals,
bool Enabled)
: S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
{
@@ -1101,11 +1107,10 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
else
Record = cast<CXXRecordDecl>(ContextDecl);
- // We care only for CVR qualifiers here, so cut everything else.
- CXXThisTypeQuals &= Qualifiers::FastMask;
- S.CXXThisTypeOverride
- = S.Context.getPointerType(
- S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals));
+ QualType T = S.Context.getRecordType(Record);
+ T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
+
+ S.CXXThisTypeOverride = S.Context.getPointerType(T);
this->Enabled = true;
}
@@ -1442,11 +1447,33 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
return Result;
}
+bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
+ // [CUDA] Ignore this function, if we can't call it.
+ const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ if (getLangOpts().CUDA &&
+ IdentifyCUDAPreference(Caller, Method) <= CFP_WrongSide)
+ return false;
+
+ SmallVector<const FunctionDecl*, 4> PreventedBy;
+ bool Result = Method->isUsualDeallocationFunction(PreventedBy);
+
+ if (Result || !getLangOpts().CUDA || PreventedBy.empty())
+ return Result;
+
+ // In case of CUDA, return true if none of the 1-argument deallocator
+ // functions are actually callable.
+ return llvm::none_of(PreventedBy, [&](const FunctionDecl *FD) {
+ assert(FD->getNumParams() == 1 &&
+ "Only single-operand functions should be in PreventedBy");
+ return IdentifyCUDAPreference(Caller, FD) >= CFP_HostDevice;
+ });
+}
+
/// Determine whether the given function is a non-placement
/// deallocation function.
static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
- return Method->isUsualDeallocationFunction();
+ return S.isUsualDeallocationFunction(Method);
if (FD->getOverloadedOperator() != OO_Delete &&
FD->getOverloadedOperator() != OO_Array_Delete)
@@ -1484,11 +1511,19 @@ namespace {
Destroying = true;
++NumBaseParams;
}
- if (FD->getNumParams() == NumBaseParams + 2)
- HasAlignValT = HasSizeT = true;
- else if (FD->getNumParams() == NumBaseParams + 1) {
- HasSizeT = FD->getParamDecl(NumBaseParams)->getType()->isIntegerType();
- HasAlignValT = !HasSizeT;
+
+ if (NumBaseParams < FD->getNumParams() &&
+ S.Context.hasSameUnqualifiedType(
+ FD->getParamDecl(NumBaseParams)->getType(),
+ S.Context.getSizeType())) {
+ ++NumBaseParams;
+ HasSizeT = true;
+ }
+
+ if (NumBaseParams < FD->getNumParams() &&
+ FD->getParamDecl(NumBaseParams)->getType()->isAlignValT()) {
+ ++NumBaseParams;
+ HasAlignValT = true;
}
// In CUDA, determine how much we'd like / dislike to call this.
@@ -1692,15 +1727,9 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
DirectInitRange = List->getSourceRange();
- return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal,
- PlacementLParen,
- PlacementArgs,
- PlacementRParen,
- TypeIdParens,
- AllocType,
- TInfo,
- ArraySize,
- DirectInitRange,
+ return BuildCXXNew(SourceRange(StartLoc, D.getEndLoc()), UseGlobal,
+ PlacementLParen, PlacementArgs, PlacementRParen,
+ TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange,
Initializer);
}
@@ -1723,28 +1752,33 @@ static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
return false;
}
-// Emit a diagnostic if an aligned allocation/deallocation function that is not
-// implemented in the standard library is selected.
-static void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
- SourceLocation Loc, bool IsDelete,
- Sema &S) {
- if (!S.getLangOpts().AlignedAllocationUnavailable)
- return;
-
- // Return if there is a definition.
+bool
+Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
+ if (!getLangOpts().AlignedAllocationUnavailable)
+ return false;
if (FD.isDefined())
- return;
-
+ return false;
bool IsAligned = false;
- if (FD.isReplaceableGlobalAllocationFunction(&IsAligned) && IsAligned) {
- const llvm::Triple &T = S.getASTContext().getTargetInfo().getTriple();
+ if (FD.isReplaceableGlobalAllocationFunction(&IsAligned) && IsAligned)
+ return true;
+ return false;
+}
+
+// Emit a diagnostic if an aligned allocation/deallocation function that is not
+// implemented in the standard library is selected.
+void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
+ SourceLocation Loc) {
+ if (isUnavailableAlignedAllocationFunction(FD)) {
+ const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
- S.getASTContext().getTargetInfo().getPlatformName());
+ getASTContext().getTargetInfo().getPlatformName());
- S.Diag(Loc, diag::warn_aligned_allocation_unavailable)
- << IsDelete << FD.getType().getAsString() << OSName
- << alignedAllocMinVersion(T.getOS()).getAsString();
- S.Diag(Loc, diag::note_silence_unligned_allocation_unavailable);
+ OverloadedOperatorKind Kind = FD.getDeclName().getCXXOverloadedOperator();
+ bool IsDelete = Kind == OO_Delete || Kind == OO_Array_Delete;
+ Diag(Loc, diag::err_aligned_allocation_unavailable)
+ << IsDelete << FD.getType().getAsString() << OSName
+ << alignedAllocMinVersion(T.getOS()).getAsString();
+ Diag(Loc, diag::note_silence_aligned_allocation_unavailable);
}
}
@@ -1787,20 +1821,21 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// A new-expression that creates an object of type T initializes that
// object as follows:
InitializationKind Kind
- // - If the new-initializer is omitted, the object is default-
- // initialized (8.5); if no initialization is performed,
- // the object has indeterminate value
- = initStyle == CXXNewExpr::NoInit
- ? InitializationKind::CreateDefault(TypeRange.getBegin())
- // - Otherwise, the new-initializer is interpreted according to the
- // initialization rules of 8.5 for direct-initialization.
- : initStyle == CXXNewExpr::ListInit
- ? InitializationKind::CreateDirectList(TypeRange.getBegin(),
- Initializer->getLocStart(),
- Initializer->getLocEnd())
- : InitializationKind::CreateDirect(TypeRange.getBegin(),
- DirectInitRange.getBegin(),
- DirectInitRange.getEnd());
+ // - If the new-initializer is omitted, the object is default-
+ // initialized (8.5); if no initialization is performed,
+ // the object has indeterminate value
+ = initStyle == CXXNewExpr::NoInit
+ ? InitializationKind::CreateDefault(TypeRange.getBegin())
+ // - Otherwise, the new-initializer is interpreted according to
+ // the
+ // initialization rules of 8.5 for direct-initialization.
+ : initStyle == CXXNewExpr::ListInit
+ ? InitializationKind::CreateDirectList(
+ TypeRange.getBegin(), Initializer->getBeginLoc(),
+ Initializer->getEndLoc())
+ : InitializationKind::CreateDirect(TypeRange.getBegin(),
+ DirectInitRange.getBegin(),
+ DirectInitRange.getEnd());
// C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
auto *Deduced = AllocType->getContainedDeducedType();
@@ -1831,19 +1866,18 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
<< AllocType << TypeRange);
if (NumInits > 1) {
Expr *FirstBad = Inits[1];
- return ExprError(Diag(FirstBad->getLocStart(),
+ return ExprError(Diag(FirstBad->getBeginLoc(),
diag::err_auto_new_ctor_multiple_expressions)
<< AllocType << TypeRange);
}
if (Braced && !getLangOpts().CPlusPlus17)
- Diag(Initializer->getLocStart(), diag::ext_auto_new_list_init)
+ Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
<< AllocType << TypeRange;
- Expr *Deduce = Inits[0];
QualType DeducedType;
- if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
+ if (DeduceAutoType(AllocTypeInfo, Inits[0], DeducedType) == DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
- << AllocType << Deduce->getType()
- << TypeRange << Deduce->getSourceRange());
+ << AllocType << Inits[0]->getType()
+ << TypeRange << Inits[0]->getSourceRange());
if (DeducedType.isNull())
return ExprError();
AllocType = DeducedType;
@@ -1983,7 +2017,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// C++14 onwards, because Value is always unsigned here!
if (ArraySize->isIntegerConstantExpr(Value, Context)) {
if (Value.isSigned() && Value.isNegative()) {
- return ExprError(Diag(ArraySize->getLocStart(),
+ return ExprError(Diag(ArraySize->getBeginLoc(),
diag::err_typecheck_negative_array_size)
<< ArraySize->getSourceRange());
}
@@ -1992,19 +2026,18 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
unsigned ActiveSizeBits =
ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
- return ExprError(Diag(ArraySize->getLocStart(),
- diag::err_array_too_large)
- << Value.toString(10)
- << ArraySize->getSourceRange());
+ return ExprError(
+ Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
+ << Value.toString(10) << ArraySize->getSourceRange());
}
KnownArraySize = Value.getZExtValue();
} else if (TypeIdParens.isValid()) {
// Can't have dynamic array size when the type-id is in parentheses.
- Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst)
- << ArraySize->getSourceRange()
- << FixItHint::CreateRemoval(TypeIdParens.getBegin())
- << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+ Diag(ArraySize->getBeginLoc(), diag::ext_new_paren_array_nonconst)
+ << ArraySize->getSourceRange()
+ << FixItHint::CreateRemoval(TypeIdParens.getBegin())
+ << FixItHint::CreateRemoval(TypeIdParens.getEnd());
TypeIdParens = SourceRange();
}
@@ -2066,8 +2099,8 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// global operator new.
if (PlacementArgs.empty() && !PassAlignment &&
(OperatorNew->isImplicit() ||
- (OperatorNew->getLocStart().isValid() &&
- getSourceManager().isInSystemHeader(OperatorNew->getLocStart())))) {
+ (OperatorNew->getBeginLoc().isValid() &&
+ getSourceManager().isInSystemHeader(OperatorNew->getBeginLoc())))) {
if (Alignment > NewAlignment)
Diag(StartLoc, diag::warn_overaligned_type)
<< AllocType
@@ -2080,8 +2113,8 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Initializer lists are also allowed, in C++11. Rely on the parser for the
// dialect distinction.
if (ArraySize && !isLegalArrayNewInitializer(initStyle, Initializer)) {
- SourceRange InitRange(Inits[0]->getLocStart(),
- Inits[NumInits - 1]->getLocEnd());
+ SourceRange InitRange(Inits[0]->getBeginLoc(),
+ Inits[NumInits - 1]->getEndLoc());
Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
return ExprError();
}
@@ -2128,13 +2161,11 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (DiagnoseUseOfDecl(OperatorNew, StartLoc))
return ExprError();
MarkFunctionReferenced(StartLoc, OperatorNew);
- diagnoseUnavailableAlignedAllocation(*OperatorNew, StartLoc, false, *this);
}
if (OperatorDelete) {
if (DiagnoseUseOfDecl(OperatorDelete, StartLoc))
return ExprError();
MarkFunctionReferenced(StartLoc, OperatorDelete);
- diagnoseUnavailableAlignedAllocation(*OperatorDelete, StartLoc, true, *this);
}
// C++0x [expr.new]p17:
@@ -2155,11 +2186,11 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
}
- return new (Context)
- CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete, PassAlignment,
- UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens,
- ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo,
- Range, DirectInitRange);
+ return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
+ PassAlignment, UsualArrayDeleteWantsSize,
+ PlacementArgs, TypeIdParens, ArraySize, initStyle,
+ Initializer, ResultType, AllocTypeInfo, Range,
+ DirectInitRange);
}
/// Checks that a type is suitable as the allocated type
@@ -2587,8 +2618,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
if (IsSizedDelete) {
SourceRange R = PlaceArgs.empty()
? SourceRange()
- : SourceRange(PlaceArgs.front()->getLocStart(),
- PlaceArgs.back()->getLocEnd());
+ : SourceRange(PlaceArgs.front()->getBeginLoc(),
+ PlaceArgs.back()->getEndLoc());
Diag(StartLoc, diag::err_placement_new_non_placement_delete) << R;
if (!OperatorDelete->isImplicit())
Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
@@ -2794,9 +2825,10 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
// Global allocation functions should always be visible.
Alloc->setVisibleDespiteOwningModule();
- // Implicit sized deallocation functions always have default visibility.
- Alloc->addAttr(
- VisibilityAttr::CreateImplicit(Context, VisibilityAttr::Default));
+ Alloc->addAttr(VisibilityAttr::CreateImplicit(
+ Context, LangOpts.GlobalAllocationFunctionVisibilityHidden
+ ? VisibilityAttr::Hidden
+ : VisibilityAttr::Default));
llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
for (QualType T : Params) {
@@ -3156,12 +3188,12 @@ void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) {
switch (Detector.analyzeDeleteExpr(DE)) {
case MismatchingNewDeleteDetector::VarInitMismatches:
case MismatchingNewDeleteDetector::MemberInitMismatches: {
- DiagnoseMismatchedNewDelete(*this, DE->getLocStart(), Detector);
+ DiagnoseMismatchedNewDelete(*this, DE->getBeginLoc(), Detector);
break;
}
case MismatchingNewDeleteDetector::AnalyzeLater: {
DeleteExprs[Detector.Field].push_back(
- std::make_pair(DE->getLocStart(), DE->isArrayForm()));
+ std::make_pair(DE->getBeginLoc(), DE->isArrayForm()));
break;
}
case MismatchingNewDeleteDetector::NoMismatch:
@@ -3280,10 +3312,10 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (Pointee.getAddressSpace() != LangAS::Default &&
!getLangOpts().OpenCLCPlusPlus)
- return Diag(Ex.get()->getLocStart(),
+ return Diag(Ex.get()->getBeginLoc(),
diag::err_address_space_qualified_delete)
- << Pointee.getUnqualifiedType()
- << Pointee.getQualifiers().getAddressSpaceAttributePrintValue();
+ << Pointee.getUnqualifiedType()
+ << Pointee.getQualifiers().getAddressSpaceAttributePrintValue();
CXXRecordDecl *PointeeRD = nullptr;
if (Pointee->isVoidType() && !isSFINAEContext()) {
@@ -3383,8 +3415,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
}
}
- diagnoseUnavailableAlignedAllocation(*OperatorDelete, StartLoc, true,
- *this);
+ DiagnoseUseOfDecl(OperatorDelete, StartLoc);
// Convert the operand to the type of the first parameter of operator
// delete. This is only necessary if we selected a destroying operator
@@ -3421,7 +3452,7 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
DeclarationName NewName = S.Context.DeclarationNames.getCXXOperatorName(
IsDelete ? OO_Delete : OO_New);
- LookupResult R(S, NewName, TheCall->getLocStart(), Sema::LookupOrdinaryName);
+ LookupResult R(S, NewName, TheCall->getBeginLoc(), Sema::LookupOrdinaryName);
S.LookupQualifiedName(R, S.Context.getTranslationUnitDecl());
assert(!R.empty() && "implicitly declared allocation functions not found");
assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
@@ -3517,13 +3548,16 @@ Sema::SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
return ExprError();
assert(OperatorNewOrDelete && "should be found");
+ DiagnoseUseOfDecl(OperatorNewOrDelete, TheCall->getExprLoc());
+ MarkFunctionReferenced(TheCall->getExprLoc(), OperatorNewOrDelete);
+
TheCall->setType(OperatorNewOrDelete->getReturnType());
for (unsigned i = 0; i != TheCall->getNumArgs(); ++i) {
QualType ParamTy = OperatorNewOrDelete->getParamDecl(i)->getType();
InitializedEntity Entity =
InitializedEntity::InitializeParameter(Context, ParamTy, false);
ExprResult Arg = PerformCopyInitialization(
- Entity, TheCall->getArg(i)->getLocStart(), TheCall->getArg(i));
+ Entity, TheCall->getArg(i)->getBeginLoc(), TheCall->getArg(i));
if (Arg.isInvalid())
return ExprError();
TheCall->setArg(i, Arg.get());
@@ -3561,7 +3595,7 @@ void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
if (getSourceManager().isInSystemHeader(PointeeRD->getLocation()))
return;
- QualType ClassType = dtor->getThisType(Context)->getPointeeType();
+ QualType ClassType = dtor->getThisType()->getPointeeType();
if (PointeeRD->isAbstract()) {
// If the class is abstract, we warn by default, because we're
// sure the code has undefined behavior.
@@ -3811,14 +3845,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From = Res.get();
}
- ExprResult CastArg
- = BuildCXXCastArgument(*this,
- From->getLocStart(),
- ToType.getNonReferenceType(),
- CastKind, cast<CXXMethodDecl>(FD),
- ICS.UserDefined.FoundConversionFunction,
- ICS.UserDefined.HadMultipleCandidates,
- From);
+ ExprResult CastArg = BuildCXXCastArgument(
+ *this, From->getBeginLoc(), ToType.getNonReferenceType(), CastKind,
+ cast<CXXMethodDecl>(FD), ICS.UserDefined.FoundConversionFunction,
+ ICS.UserDefined.HadMultipleCandidates, From);
if (CastArg.isInvalid())
return ExprError();
@@ -3906,7 +3936,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (!Fn)
return ExprError();
- if (DiagnoseUseOfDecl(Fn, From->getLocStart()))
+ if (DiagnoseUseOfDecl(Fn, From->getBeginLoc()))
return ExprError();
From = FixOverloadedFunctionReference(From, Found, Fn);
@@ -4045,15 +4075,15 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (SCS.IncompatibleObjC && Action != AA_Casting) {
// Diagnose incompatible Objective-C conversions
if (Action == AA_Initializing || Action == AA_Assigning)
- Diag(From->getLocStart(),
+ Diag(From->getBeginLoc(),
diag::ext_typecheck_convert_incompatible_pointer)
- << ToType << From->getType() << Action
- << From->getSourceRange() << 0;
+ << ToType << From->getType() << Action << From->getSourceRange()
+ << 0;
else
- Diag(From->getLocStart(),
+ Diag(From->getBeginLoc(),
diag::ext_typecheck_convert_incompatible_pointer)
- << From->getType() << ToType << Action
- << From->getSourceRange() << 0;
+ << From->getType() << ToType << Action << From->getSourceRange()
+ << 0;
if (From->getType()->isObjCObjectPointerType() &&
ToType->isObjCObjectPointerType())
@@ -4062,13 +4092,11 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
!CheckObjCARCUnavailableWeakConversion(ToType,
From->getType())) {
if (Action == AA_Initializing)
- Diag(From->getLocStart(),
- diag::err_arc_weak_unavailable_assign);
+ Diag(From->getBeginLoc(), diag::err_arc_weak_unavailable_assign);
else
- Diag(From->getLocStart(),
- diag::err_arc_convesion_of_weak_unavailable)
- << (Action == AA_Casting) << From->getType() << ToType
- << From->getSourceRange();
+ Diag(From->getBeginLoc(), diag::err_arc_convesion_of_weak_unavailable)
+ << (Action == AA_Casting) << From->getType() << ToType
+ << From->getSourceRange();
}
CastKind Kind;
@@ -4124,12 +4152,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Derived_To_Base: {
CXXCastPath BasePath;
- if (CheckDerivedToBaseConversion(From->getType(),
- ToType.getNonReferenceType(),
- From->getLocStart(),
- From->getSourceRange(),
- &BasePath,
- CStyle))
+ if (CheckDerivedToBaseConversion(
+ From->getType(), ToType.getNonReferenceType(), From->getBeginLoc(),
+ From->getSourceRange(), &BasePath, CStyle))
return ExprError();
From = ImpCastExprToType(From, ToType.getNonReferenceType(),
@@ -4223,14 +4248,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
case ICK_Zero_Event_Conversion:
- From = ImpCastExprToType(From, ToType,
- CK_ZeroToOCLEvent,
- From->getValueKind()).get();
- break;
-
case ICK_Zero_Queue_Conversion:
From = ImpCastExprToType(From, ToType,
- CK_ZeroToOCLQueue,
+ CK_ZeroToOCLOpaqueType,
From->getValueKind()).get();
break;
@@ -4263,17 +4283,32 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Qualification: {
// The qualification keeps the category of the inner expression, unless the
// target type isn't a reference.
- ExprValueKind VK = ToType->isReferenceType() ?
- From->getValueKind() : VK_RValue;
- From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
- CK_NoOp, VK, /*BasePath=*/nullptr, CCK).get();
+ ExprValueKind VK =
+ ToType->isReferenceType() ? From->getValueKind() : VK_RValue;
+
+ CastKind CK = CK_NoOp;
+
+ if (ToType->isReferenceType() &&
+ ToType->getPointeeType().getAddressSpace() !=
+ From->getType().getAddressSpace())
+ CK = CK_AddressSpaceConversion;
+
+ if (ToType->isPointerType() &&
+ ToType->getPointeeType().getAddressSpace() !=
+ From->getType()->getPointeeType().getAddressSpace())
+ CK = CK_AddressSpaceConversion;
+
+ From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK, VK,
+ /*BasePath=*/nullptr, CCK)
+ .get();
if (SCS.DeprecatedStringLiteralToCharPtr &&
!getLangOpts().WritableStrings) {
- Diag(From->getLocStart(), getLangOpts().CPlusPlus11
- ? diag::ext_deprecated_string_literal_conversion
- : diag::warn_deprecated_string_literal_conversion)
- << ToType.getNonReferenceType();
+ Diag(From->getBeginLoc(),
+ getLangOpts().CPlusPlus11
+ ? diag::ext_deprecated_string_literal_conversion
+ : diag::warn_deprecated_string_literal_conversion)
+ << ToType.getNonReferenceType();
}
break;
@@ -4296,7 +4331,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// _Nullable type to a _Nonnull one, complain.
if (!isCast(CCK))
diagnoseNullableToNonnullConversion(ToType, InitialFromType,
- From->getLocStart());
+ From->getBeginLoc());
return From;
}
@@ -4926,7 +4961,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (ArgTy->isObjectType() || ArgTy->isFunctionType())
ArgTy = S.Context.getRValueReferenceType(ArgTy);
OpaqueArgExprs.push_back(
- OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ OpaqueValueExpr(Args[I]->getTypeLoc().getBeginLoc(),
ArgTy.getNonLValueExprType(S.Context),
Expr::getValueKindForType(ArgTy)));
}
@@ -5421,10 +5456,10 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
}
CXXCastPath BasePath;
- if (CheckDerivedToBaseConversion(LHSType, Class, Loc,
- SourceRange(LHS.get()->getLocStart(),
- RHS.get()->getLocEnd()),
- &BasePath))
+ if (CheckDerivedToBaseConversion(
+ LHSType, Class, Loc,
+ SourceRange(LHS.get()->getBeginLoc(), RHS.get()->getEndLoc()),
+ &BasePath))
return QualType();
// Cast LHS to type of use.
@@ -5518,8 +5553,8 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
HaveConversion = false;
ToType = To->getType();
- InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(),
- SourceLocation());
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(To->getBeginLoc(), SourceLocation());
// C++11 5.16p3
// The process for determining whether an operand expression E1 of type T1
// can be converted to match an operand expression E2 of type T2 is defined
@@ -5664,8 +5699,8 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS
/// TryClassUnification.
static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
- InitializationKind Kind = InitializationKind::CreateCopy(E.get()->getLocStart(),
- SourceLocation());
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(E.get()->getBeginLoc(), SourceLocation());
Expr *Arg = E.get();
InitializationSequence InitSeq(Self, Entity, Kind, Arg);
ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg);
@@ -6515,6 +6550,11 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
ExpressionEvaluationContextRecord::EK_Decltype &&
"not in a decltype expression");
+ ExprResult Result = CheckPlaceholderExpr(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.get();
+
// C++11 [expr.call]p11:
// If a function call is a prvalue of object type,
// -- if the function call is either
@@ -6571,8 +6611,7 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
continue;
if (CheckCallReturnType(Call->getCallReturnType(Context),
- Call->getLocStart(),
- Call, Call->getDirectCallee()))
+ Call->getBeginLoc(), Call, Call->getDirectCallee()))
return ExprError();
}
@@ -6707,7 +6746,7 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
<< BaseType << Base->getSourceRange();
CallExpr *CE = dyn_cast<CallExpr>(Base);
if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) {
- Diag(CD->getLocStart(),
+ Diag(CD->getBeginLoc(),
diag::note_member_reference_arrow_from_operator_arrow);
}
}
@@ -7162,9 +7201,8 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
ExprValueKind VK = Expr::getValueKindForType(ResultType);
ResultType = ResultType.getNonLValueExprType(Context);
- CXXMemberCallExpr *CE =
- new (Context) CXXMemberCallExpr(Context, ME, None, ResultType, VK,
- Exp.get()->getLocEnd());
+ CXXMemberCallExpr *CE = CXXMemberCallExpr::Create(
+ Context, ME, /*Args=*/{}, ResultType, VK, Exp.get()->getEndLoc());
if (CheckFunctionCall(Method, CE,
Method->getType()->castAs<FunctionProtoType>()))
@@ -7752,41 +7790,24 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
bool DiscardedValue,
- bool IsConstexpr,
- bool IsLambdaInitCaptureInitializer) {
+ bool IsConstexpr) {
ExprResult FullExpr = FE;
if (!FullExpr.get())
return ExprError();
- // If we are an init-expression in a lambdas init-capture, we should not
- // diagnose an unexpanded pack now (will be diagnosed once lambda-expr
- // containing full-expression is done).
- // template<class ... Ts> void test(Ts ... t) {
- // test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now.
- // return a;
- // }() ...);
- // }
- // FIXME: This is a hack. It would be better if we pushed the lambda scope
- // when we parse the lambda introducer, and teach capturing (but not
- // unexpanded pack detection) to walk over LambdaScopeInfos which don't have a
- // corresponding class yet (that is, have LambdaScopeInfo either represent a
- // lambda where we've entered the introducer but not the body, or represent a
- // lambda where we've entered the body, depending on where the
- // parser/instantiation has got to).
- if (!IsLambdaInitCaptureInitializer &&
- DiagnoseUnexpandedParameterPack(FullExpr.get()))
+ if (DiagnoseUnexpandedParameterPack(FullExpr.get()))
return ExprError();
- // Top-level expressions default to 'id' when we're in a debugger.
- if (DiscardedValue && getLangOpts().DebuggerCastResultToId &&
- FullExpr.get()->getType() == Context.UnknownAnyTy) {
- FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType());
- if (FullExpr.isInvalid())
- return ExprError();
- }
-
if (DiscardedValue) {
+ // Top-level expressions default to 'id' when we're in a debugger.
+ if (getLangOpts().DebuggerCastResultToId &&
+ FullExpr.get()->getType() == Context.UnknownAnyTy) {
+ FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
FullExpr = CheckPlaceholderExpr(FullExpr.get());
if (FullExpr.isInvalid())
return ExprError();
@@ -7794,6 +7815,8 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
FullExpr = IgnoredValueConversions(FullExpr.get());
if (FullExpr.isInvalid())
return ExprError();
+
+ DiagnoseUnusedExprResult(FullExpr.get());
}
FullExpr = CorrectDelayedTyposInExpr(FullExpr.get());
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index e6d2b5068fd5..b2b21ba9eefa 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -496,7 +496,7 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
// allows this, while still reporting an error if T is a struct pointer.
if (!IsArrow) {
const PointerType *PT = BaseType->getAs<PointerType>();
- if (PT && (!getLangOpts().ObjC1 ||
+ if (PT && (!getLangOpts().ObjC ||
PT->getPointeeType()->isRecordType())) {
assert(BaseExpr && "cannot happen with implicit member accesses");
Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
@@ -1708,9 +1708,31 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
}
ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl};
- return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS,
- TemplateKWLoc, FirstQualifierInScope,
- NameInfo, TemplateArgs, S, &ExtraArgs);
+ ExprResult Res = BuildMemberReferenceExpr(
+ Base, Base->getType(), OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, NameInfo, TemplateArgs, S, &ExtraArgs);
+
+ if (!Res.isInvalid() && isa<MemberExpr>(Res.get()))
+ CheckMemberAccessOfNoDeref(cast<MemberExpr>(Res.get()));
+
+ return Res;
+}
+
+void Sema::CheckMemberAccessOfNoDeref(const MemberExpr *E) {
+ QualType ResultTy = E->getType();
+
+ // Do not warn on member accesses to arrays since this returns an array
+ // lvalue and does not actually dereference memory.
+ if (isa<ArrayType>(ResultTy))
+ return;
+
+ if (E->isArrow()) {
+ if (const auto *Ptr = dyn_cast<PointerType>(
+ E->getBase()->getType().getDesugaredType(Context))) {
+ if (Ptr->getPointeeType()->hasAttr(attr::NoDeref))
+ ExprEvalContexts.back().PossibleDerefs.insert(E);
+ }
+ }
}
ExprResult
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index b291fc8691d5..ed780efd4cf3 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -50,8 +50,8 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
// ObjC strings can't be wide or UTF.
if (!S->isAscii()) {
- Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant)
- << S->getSourceRange();
+ Diag(S->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
+ << S->getSourceRange();
return true;
}
@@ -107,8 +107,8 @@ ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
} else {
// If there is no NSConstantString interface defined then treat this
// as error and recover from it.
- Diag(S->getLocStart(), diag::err_no_nsconstant_string_class) << NSIdent
- << S->getSourceRange();
+ Diag(S->getBeginLoc(), diag::err_no_nsconstant_string_class)
+ << NSIdent << S->getSourceRange();
Ty = Context.getObjCIdType();
}
} else {
@@ -399,9 +399,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
InitializedEntity Entity
= InitializedEntity::InitializeParameter(S.Context, T,
/*Consumed=*/false);
- InitializationKind Kind
- = InitializationKind::CreateCopy(Element->getLocStart(),
- SourceLocation());
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Element->getBeginLoc(), SourceLocation());
InitializationSequence Seq(S, Entity, Kind, Element);
if (!Seq.Failed())
return Seq.Perform(S, Entity, Kind, Element);
@@ -432,12 +431,12 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2
: 3;
- S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
- << Which << OrigElement->getSourceRange()
- << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+ S.Diag(OrigElement->getBeginLoc(), diag::err_box_literal_collection)
+ << Which << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
- Result = S.BuildObjCNumericLiteral(OrigElement->getLocStart(),
- OrigElement);
+ Result =
+ S.BuildObjCNumericLiteral(OrigElement->getBeginLoc(), OrigElement);
if (Result.isInvalid())
return ExprError();
@@ -448,11 +447,11 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
// If this is potentially an Objective-C string literal, add the '@'.
else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
if (String->isAscii()) {
- S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
- << 0 << OrigElement->getSourceRange()
- << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+ S.Diag(OrigElement->getBeginLoc(), diag::err_box_literal_collection)
+ << 0 << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
- Result = S.BuildObjCStringLiteral(OrigElement->getLocStart(), String);
+ Result = S.BuildObjCStringLiteral(OrigElement->getBeginLoc(), String);
if (Result.isInvalid())
return ExprError();
@@ -462,8 +461,8 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
}
if (!Recovered) {
- S.Diag(Element->getLocStart(), diag::err_invalid_collection_element)
- << Element->getType();
+ S.Diag(Element->getBeginLoc(), diag::err_invalid_collection_element)
+ << Element->getType();
return ExprError();
}
}
@@ -481,9 +480,9 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
break;
}
if (!hasMacro)
- S.Diag(Element->getLocStart(),
+ S.Diag(Element->getBeginLoc(),
diag::warn_concatenated_nsarray_literal)
- << Element->getType();
+ << Element->getType();
}
}
}
@@ -491,9 +490,9 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
// Make sure that the element has the type that the container factory
// function expects.
return S.PerformCopyInitialization(
- InitializedEntity::InitializeParameter(S.Context, T,
- /*Consumed=*/false),
- Element->getLocStart(), Element);
+ InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false),
+ Element->getBeginLoc(), Element);
}
ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
@@ -1034,8 +1033,8 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
!Element.Value->containsUnexpandedParameterPack()) {
Diag(Element.EllipsisLoc,
diag::err_pack_expansion_without_parameter_packs)
- << SourceRange(Element.Key->getLocStart(),
- Element.Value->getLocEnd());
+ << SourceRange(Element.Key->getBeginLoc(),
+ Element.Value->getEndLoc());
return ExprError();
}
@@ -1228,8 +1227,12 @@ ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
return true;
}
- if (PDecl->hasDefinition())
+ if (!PDecl->hasDefinition()) {
+ Diag(ProtoLoc, diag::err_atprotocol_protocol) << PDecl;
+ Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl;
+ } else {
PDecl = PDecl->getDefinition();
+ }
QualType Ty = Context.getObjCProtoType();
if (Ty.isNull())
@@ -1343,7 +1346,8 @@ static QualType getBaseMessageSendResultType(Sema &S,
return transferNullability(ReceiverType);
}
-QualType Sema::getMessageSendResultType(QualType ReceiverType,
+QualType Sema::getMessageSendResultType(const Expr *Receiver,
+ QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage,
bool isSuperMessage) {
@@ -1354,8 +1358,33 @@ QualType Sema::getMessageSendResultType(QualType ReceiverType,
isSuperMessage);
// If this is a class message, ignore the nullability of the receiver.
- if (isClassMessage)
+ if (isClassMessage) {
+ // In a class method, class messages to 'self' that return instancetype can
+ // be typed as the current class. We can safely do this in ARC because self
+ // can't be reassigned, and we do it unsafely outside of ARC because in
+ // practice people never reassign self in class methods and there's some
+ // virtue in not being aggressively pedantic.
+ if (Receiver && Receiver->isObjCSelfExpr()) {
+ assert(ReceiverType->isObjCClassType() && "expected a Class self");
+ QualType T = Method->getSendResultType(ReceiverType);
+ AttributedType::stripOuterNullability(T);
+ if (T == Context.getObjCInstanceType()) {
+ const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(
+ cast<ImplicitParamDecl>(
+ cast<DeclRefExpr>(Receiver->IgnoreParenImpCasts())->getDecl())
+ ->getDeclContext());
+ assert(MD->isClassMethod() && "expected a class method");
+ QualType NewResultType = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(MD->getClassInterface()));
+ if (auto Nullability = resultType->getNullability(Context))
+ NewResultType = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*Nullability),
+ NewResultType, NewResultType);
+ return NewResultType;
+ }
+ }
return resultType;
+ }
// There is nothing left to do if the result type cannot have a nullability
// specifier.
@@ -1502,15 +1531,12 @@ void Sema::EmitRelatedResultTypeNote(const Expr *E) {
<< MsgSend->getType();
}
-bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
- MultiExprArg Args,
- Selector Sel,
- ArrayRef<SourceLocation> SelectorLocs,
- ObjCMethodDecl *Method,
- bool isClassMessage, bool isSuperMessage,
- SourceLocation lbrac, SourceLocation rbrac,
- SourceRange RecRange,
- QualType &ReturnType, ExprValueKind &VK) {
+bool Sema::CheckMessageArgumentTypes(
+ const Expr *Receiver, QualType ReceiverType, MultiExprArg Args,
+ Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method,
+ bool isClassMessage, bool isSuperMessage, SourceLocation lbrac,
+ SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType,
+ ExprValueKind &VK) {
SourceLocation SelLoc;
if (!SelectorLocs.empty() && SelectorLocs.front().isValid())
SelLoc = SelectorLocs.front();
@@ -1587,8 +1613,8 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
return false;
}
- ReturnType = getMessageSendResultType(ReceiverType, Method, isClassMessage,
- isSuperMessage);
+ ReturnType = getMessageSendResultType(Receiver, ReceiverType, Method,
+ isClassMessage, isSuperMessage);
VK = Expr::getValueKindForType(Method->getReturnType());
unsigned NumNamedArgs = Sel.getNumArgs();
@@ -1693,12 +1719,12 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
} else {
// Check for extra arguments to non-variadic methods.
if (Args.size() != NumNamedArgs) {
- Diag(Args[NumNamedArgs]->getLocStart(),
+ Diag(Args[NumNamedArgs]->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
- << 2 /*method*/ << NumNamedArgs << static_cast<unsigned>(Args.size())
- << Method->getSourceRange()
- << SourceRange(Args[NumNamedArgs]->getLocStart(),
- Args.back()->getLocEnd());
+ << 2 /*method*/ << NumNamedArgs << static_cast<unsigned>(Args.size())
+ << Method->getSourceRange()
+ << SourceRange(Args[NumNamedArgs]->getBeginLoc(),
+ Args.back()->getEndLoc());
}
}
@@ -2323,7 +2349,7 @@ static void checkFoundationAPI(Sema &S, SourceLocation Loc,
<< (!Ret->isRecordType()
? /*Vector*/ 2
: Ret->isUnionType() ? /*Union*/ 1 : /*Struct*/ 0);
- S.Diag(ImpliedMethod->getLocStart(),
+ S.Diag(ImpliedMethod->getBeginLoc(),
diag::note_objc_unsafe_perform_selector_method_declared_here)
<< ImpliedMethod->getSelector() << Ret;
}
@@ -2468,7 +2494,8 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
if (!Method)
Method = Class->lookupPrivateClassMethod(Sel);
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs,
+ nullptr, false, false, Class))
return ExprError();
}
@@ -2478,12 +2505,10 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
unsigned NumArgs = ArgsIn.size();
Expr **Args = ArgsIn.data();
- if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
- Sel, SelectorLocs,
- Method, true,
- SuperLoc.isValid(), LBracLoc, RBracLoc,
- SourceRange(),
- ReturnType, VK))
+ if (CheckMessageArgumentTypes(/*Receiver=*/nullptr, ReceiverType,
+ MultiExprArg(Args, NumArgs), Sel, SelectorLocs,
+ Method, true, SuperLoc.isValid(), LBracLoc,
+ RBracLoc, SourceRange(), ReturnType, VK))
return ExprError();
if (Method && !Method->getReturnType()->isVoidType() &&
@@ -2582,7 +2607,7 @@ static bool isMethodDeclaredInRootProtocol(Sema &S, const ObjCMethodDecl *M) {
return false;
const IdentifierInfo *II = S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
if (const auto *RootClass = dyn_cast_or_null<ObjCInterfaceDecl>(
- S.LookupSingleName(S.TUScope, II, Protocol->getLocStart(),
+ S.LookupSingleName(S.TUScope, II, Protocol->getBeginLoc(),
Sema::LookupOrdinaryName))) {
for (const ObjCProtocolDecl *P : RootClass->all_referenced_protocols()) {
if (P->getCanonicalDecl() == Protocol->getCanonicalDecl())
@@ -2635,7 +2660,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
"use it instead.");
// The location of the receiver.
- SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
+ SourceLocation Loc = SuperLoc.isValid() ? SuperLoc : Receiver->getBeginLoc();
SourceRange RecRange =
SuperLoc.isValid()? SuperLoc : Receiver->getSourceRange();
ArrayRef<SourceLocation> SelectorSlotLocs;
@@ -2781,14 +2806,19 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
} else {
if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
+ // FIXME: Is this correct? Why are we assuming that a message to
+ // Class will call a method in the current interface?
+
// First check the public methods in the class interface.
Method = ClassDecl->lookupClassMethod(Sel);
if (!Method)
Method = ClassDecl->lookupPrivateClassMethod(Sel);
+
+ if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs, nullptr,
+ false, false, ClassDecl))
+ return ExprError();
}
- if (Method && DiagnoseUseOfDecl(Method, SelectorSlotLocs))
- return ExprError();
}
if (!Method) {
// If not messaging 'self', look for any factory method named 'Sel'.
@@ -2856,8 +2886,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
return ExprError();
forwardClass = OCIType->getInterfaceDecl();
- Diag(Receiver ? Receiver->getLocStart()
- : SuperLoc, diag::note_receiver_is_id);
+ Diag(Receiver ? Receiver->getBeginLoc() : SuperLoc,
+ diag::note_receiver_is_id);
Method = nullptr;
} else {
Method = ClassDecl->lookupInstanceMethod(Sel);
@@ -2973,9 +3003,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
ExprValueKind VK = VK_RValue;
bool ClassMessage = (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType());
- if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs),
- Sel, SelectorLocs, Method,
- ClassMessage, SuperLoc.isValid(),
+ if (CheckMessageArgumentTypes(Receiver, ReceiverType,
+ MultiExprArg(Args, NumArgs), Sel, SelectorLocs,
+ Method, ClassMessage, SuperLoc.isValid(),
LBracLoc, RBracLoc, RecRange, ReturnType, VK))
return ExprError();
@@ -3131,7 +3161,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak;
if (!IsWeak && Sel.isUnarySelector())
IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak;
- if (IsWeak &&
+ if (IsWeak && !isUnevaluatedContext() &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, LBracLoc))
getCurFunction()->recordUseOfWeak(Result, Prop);
}
@@ -3776,8 +3806,8 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
(CastClass && CastClass->isSuperClassOf(ExprClass)))
return true;
if (warn)
- S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
- << T << Target->getName() << castType->getPointeeType();
+ S.Diag(castExpr->getBeginLoc(), diag::warn_objc_invalid_bridge)
+ << T << Target->getName() << castType->getPointeeType();
return false;
} else if (castType->isObjCIdType() ||
(S.Context.ObjCObjectAdoptsQTypeProtocols(
@@ -3788,20 +3818,21 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
return true;
else {
if (warn) {
- S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge)
- << T << Target->getName() << castType;
- S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
- S.Diag(Target->getLocStart(), diag::note_declared_at);
+ S.Diag(castExpr->getBeginLoc(), diag::warn_objc_invalid_bridge)
+ << T << Target->getName() << castType;
+ S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
+ S.Diag(Target->getBeginLoc(), diag::note_declared_at);
}
return false;
}
}
} else if (!castType->isObjCIdType()) {
- S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface)
- << castExpr->getType() << Parm;
- S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ S.Diag(castExpr->getBeginLoc(),
+ diag::err_objc_cf_bridged_not_interface)
+ << castExpr->getType() << Parm;
+ S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
if (Target)
- S.Diag(Target->getLocStart(), diag::note_declared_at);
+ S.Diag(Target->getBeginLoc(), diag::note_declared_at);
}
return true;
}
@@ -3841,9 +3872,10 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
(ExprClass && CastClass->isSuperClassOf(ExprClass)))
return true;
if (warn) {
- S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
- << castExpr->getType()->getPointeeType() << T;
- S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ S.Diag(castExpr->getBeginLoc(),
+ diag::warn_objc_invalid_bridge_to_cf)
+ << castExpr->getType()->getPointeeType() << T;
+ S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
}
return false;
} else if (castExpr->getType()->isObjCIdType() ||
@@ -3855,20 +3887,22 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
return true;
else {
if (warn) {
- S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf)
- << castExpr->getType() << castType;
- S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
- S.Diag(Target->getLocStart(), diag::note_declared_at);
+ S.Diag(castExpr->getBeginLoc(),
+ diag::warn_objc_invalid_bridge_to_cf)
+ << castExpr->getType() << castType;
+ S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
+ S.Diag(Target->getBeginLoc(), diag::note_declared_at);
}
return false;
}
}
}
- S.Diag(castExpr->getLocStart(), diag::err_objc_ns_bridged_invalid_cfobject)
- << castExpr->getType() << castType;
- S.Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ S.Diag(castExpr->getBeginLoc(),
+ diag::err_objc_ns_bridged_invalid_cfobject)
+ << castExpr->getType() << castType;
+ S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
if (Target)
- S.Diag(Target->getLocStart(), diag::note_declared_at);
+ S.Diag(Target->getBeginLoc(), diag::note_declared_at);
return true;
}
return false;
@@ -3879,7 +3913,7 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
}
void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return;
// warn in presence of __bridge casting to or from a toll free bridge cast.
ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType());
@@ -3945,13 +3979,13 @@ void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) {
ARCConversionTypeClass castExprACTC = classifyTypeForARCConversion(castType);
if (srcExprACTC != ACTC_retainable || castExprACTC != ACTC_coreFoundation)
return;
- CheckObjCBridgeRelatedConversions(castExpr->getLocStart(),
- castType, SrcType, castExpr);
+ CheckObjCBridgeRelatedConversions(castExpr->getBeginLoc(), castType, SrcType,
+ castExpr);
}
bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
ARCConversionTypeClass exprACTC =
classifyTypeForARCConversion(castExpr->getType());
@@ -3991,7 +4025,7 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId
<< SrcType << DestType;
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
}
return false;
}
@@ -4002,9 +4036,9 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId
<< SrcType << DestType;
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
if (Target)
- Diag(Target->getLocStart(), diag::note_declared_at);
+ Diag(Target->getBeginLoc(), diag::note_declared_at);
}
return false;
}
@@ -4017,7 +4051,7 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << false;
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
}
return false;
}
@@ -4031,7 +4065,7 @@ bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc,
if (Diagnose) {
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << Sel << true;
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
}
return false;
}
@@ -4067,14 +4101,16 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
ExpressionString += RelatedClass->getNameAsString();
ExpressionString += " ";
ExpressionString += ClassMethod->getSelector().getAsString();
- SourceLocation SrcExprEndLoc = getLocForEndOfToken(SrcExpr->getLocEnd());
+ SourceLocation SrcExprEndLoc =
+ getLocForEndOfToken(SrcExpr->getEndLoc());
// Provide a fixit: [RelatedClass ClassMethod SrcExpr]
Diag(Loc, diag::err_objc_bridged_related_known_method)
- << SrcType << DestType << ClassMethod->getSelector() << false
- << FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString)
- << FixItHint::CreateInsertion(SrcExprEndLoc, "]");
- Diag(RelatedClass->getLocStart(), diag::note_declared_at);
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ << SrcType << DestType << ClassMethod->getSelector() << false
+ << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(),
+ ExpressionString)
+ << FixItHint::CreateInsertion(SrcExprEndLoc, "]");
+ Diag(RelatedClass->getBeginLoc(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
QualType receiverType = Context.getObjCInterfaceType(RelatedClass);
// Argument.
@@ -4094,7 +4130,7 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
if (Diagnose) {
std::string ExpressionString;
SourceLocation SrcExprEndLoc =
- getLocForEndOfToken(SrcExpr->getLocEnd());
+ getLocForEndOfToken(SrcExpr->getEndLoc());
if (InstanceMethod->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl =
InstanceMethod->findPropertyDecl()) {
@@ -4113,11 +4149,11 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
Diag(Loc, diag::err_objc_bridged_related_known_method)
<< SrcType << DestType << InstanceMethod->getSelector() << true
- << FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[")
+ << FixItHint::CreateInsertion(SrcExpr->getBeginLoc(), "[")
<< FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString);
}
- Diag(RelatedClass->getLocStart(), diag::note_declared_at);
- Diag(TDNDecl->getLocStart(), diag::note_declared_at);
+ Diag(RelatedClass->getBeginLoc(), diag::note_declared_at);
+ Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
ExprResult msg =
BuildInstanceMessageImplicit(SrcExpr, SrcType,
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index f006a677b678..10c0c6bf33b3 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -194,15 +194,15 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
// [dcl.init.string]p2
if (StrLength > CAT->getSize().getZExtValue())
- S.Diag(Str->getLocStart(),
+ S.Diag(Str->getBeginLoc(),
diag::err_initializer_string_for_char_array_too_long)
- << Str->getSourceRange();
+ << Str->getSourceRange();
} else {
// C99 6.7.8p14.
if (StrLength-1 > CAT->getSize().getZExtValue())
- S.Diag(Str->getLocStart(),
+ S.Diag(Str->getBeginLoc(),
diag::ext_initializer_string_for_char_array_too_long)
- << Str->getSourceRange();
+ << Str->getSourceRange();
}
// Set the type to the actual size that we are initializing. If we have
@@ -518,10 +518,10 @@ void InitListChecker::FillInEmptyInitForBase(
if (!ILE->getInit(Init)) {
ExprResult BaseInit =
- FillWithNoInit ? new (SemaRef.Context) NoInitExpr(Base.getType())
- : PerformEmptyInit(SemaRef, ILE->getLocEnd(), BaseEntity,
- /*VerifyOnly*/ false,
- TreatUnavailableAsInvalid);
+ FillWithNoInit
+ ? new (SemaRef.Context) NoInitExpr(Base.getType())
+ : PerformEmptyInit(SemaRef, ILE->getEndLoc(), BaseEntity,
+ /*VerifyOnly*/ false, TreatUnavailableAsInvalid);
if (BaseInit.isInvalid()) {
hadError = true;
return;
@@ -545,7 +545,7 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
InitListExpr *ILE,
bool &RequiresSecondPass,
bool FillWithNoInit) {
- SourceLocation Loc = ILE->getLocEnd();
+ SourceLocation Loc = ILE->getEndLoc();
unsigned NumInits = ILE->getNumInits();
InitializedEntity MemberEntity
= InitializedEntity::InitializeMember(Field, &ParentEntity);
@@ -765,10 +765,9 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
if (FillWithNoInit)
Filler = new (SemaRef.Context) NoInitExpr(ElementType);
else {
- ExprResult ElementInit = PerformEmptyInit(SemaRef, ILE->getLocEnd(),
- ElementEntity,
- /*VerifyOnly*/false,
- TreatUnavailableAsInvalid);
+ ExprResult ElementInit =
+ PerformEmptyInit(SemaRef, ILE->getEndLoc(), ElementEntity,
+ /*VerifyOnly*/ false, TreatUnavailableAsInvalid);
if (ElementInit.isInvalid()) {
hadError = true;
return;
@@ -917,7 +916,7 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
if (maxElements == 0) {
if (!VerifyOnly)
- SemaRef.Diag(ParentIList->getInit(Index)->getLocStart(),
+ SemaRef.Diag(ParentIList->getInit(Index)->getBeginLoc(),
diag::err_implicit_empty_initializer);
++Index;
hadError = true;
@@ -925,11 +924,10 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
}
// Build a structured initializer list corresponding to this subobject.
- InitListExpr *StructuredSubobjectInitList
- = getStructuredSubobjectInit(ParentIList, Index, T, StructuredList,
- StructuredIndex,
- SourceRange(ParentIList->getInit(Index)->getLocStart(),
- ParentIList->getSourceRange().getEnd()));
+ InitListExpr *StructuredSubobjectInitList = getStructuredSubobjectInit(
+ ParentIList, Index, T, StructuredList, StructuredIndex,
+ SourceRange(ParentIList->getInit(Index)->getBeginLoc(),
+ ParentIList->getSourceRange().getEnd()));
unsigned StructuredSubobjectInitIndex = 0;
// Check the element types and build the structural subobject.
@@ -956,16 +954,24 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
if ((T->isArrayType() || T->isRecordType()) &&
!ParentIList->isIdiomaticZeroInitializer(SemaRef.getLangOpts()) &&
!isIdiomaticBraceElisionEntity(Entity)) {
- SemaRef.Diag(StructuredSubobjectInitList->getLocStart(),
+ SemaRef.Diag(StructuredSubobjectInitList->getBeginLoc(),
diag::warn_missing_braces)
<< StructuredSubobjectInitList->getSourceRange()
<< FixItHint::CreateInsertion(
- StructuredSubobjectInitList->getLocStart(), "{")
+ StructuredSubobjectInitList->getBeginLoc(), "{")
<< FixItHint::CreateInsertion(
SemaRef.getLocForEndOfToken(
- StructuredSubobjectInitList->getLocEnd()),
+ StructuredSubobjectInitList->getEndLoc()),
"}");
}
+
+ // Warn if this type won't be an aggregate in future versions of C++.
+ auto *CXXRD = T->getAsCXXRecordDecl();
+ if (CXXRD && CXXRD->hasUserDeclaredConstructor()) {
+ SemaRef.Diag(StructuredSubobjectInitList->getBeginLoc(),
+ diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ << StructuredSubobjectInitList->getSourceRange() << T;
+ }
}
}
@@ -1080,8 +1086,8 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
hadError = true;
}
// Special-case
- SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
- << IList->getInit(Index)->getSourceRange();
+ SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
+ << IList->getInit(Index)->getSourceRange();
} else if (!T->isIncompleteType()) {
// Don't complain for incomplete types, since we'll get an error
// elsewhere
@@ -1103,14 +1109,35 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
hadError = true;
}
- SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
- << initKind << IList->getInit(Index)->getSourceRange();
+ SemaRef.Diag(IList->getInit(Index)->getBeginLoc(), DK)
+ << initKind << IList->getInit(Index)->getSourceRange();
}
}
- if (!VerifyOnly && T->isScalarType() &&
- IList->getNumInits() == 1 && !isa<InitListExpr>(IList->getInit(0)))
- warnBracedScalarInit(SemaRef, Entity, IList->getSourceRange());
+ if (!VerifyOnly) {
+ if (T->isScalarType() && IList->getNumInits() == 1 &&
+ !isa<InitListExpr>(IList->getInit(0)))
+ warnBracedScalarInit(SemaRef, Entity, IList->getSourceRange());
+
+ // Warn if this is a class type that won't be an aggregate in future
+ // versions of C++.
+ auto *CXXRD = T->getAsCXXRecordDecl();
+ if (CXXRD && CXXRD->hasUserDeclaredConstructor()) {
+ // Don't warn if there's an equivalent default constructor that would be
+ // used instead.
+ bool HasEquivCtor = false;
+ if (IList->getNumInits() == 0) {
+ auto *CD = SemaRef.LookupDefaultConstructor(CXXRD);
+ HasEquivCtor = CD && !CD->isDeleted();
+ }
+
+ if (!HasEquivCtor) {
+ SemaRef.Diag(IList->getBeginLoc(),
+ diag::warn_cxx2a_compat_aggregate_init_with_ctors)
+ << IList->getSourceRange() << T;
+ }
+ }
+ }
}
void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
@@ -1155,21 +1182,24 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
// This type is invalid, issue a diagnostic.
++Index;
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
- << DeclType;
+ SemaRef.Diag(IList->getBeginLoc(), diag::err_illegal_initializer_type)
+ << DeclType;
hadError = true;
} else if (DeclType->isReferenceType()) {
CheckReferenceType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
} else if (DeclType->isObjCObjectType()) {
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_init_objc_class)
- << DeclType;
+ SemaRef.Diag(IList->getBeginLoc(), diag::err_init_objc_class) << DeclType;
hadError = true;
+ } else if (DeclType->isOCLIntelSubgroupAVCType()) {
+ // Checks for scalar type are sufficient for these types too.
+ CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
} else {
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
- << DeclType;
+ SemaRef.Diag(IList->getBeginLoc(), diag::err_illegal_initializer_type)
+ << DeclType;
hadError = true;
}
}
@@ -1232,7 +1262,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// FIXME: Better EqualLoc?
InitializationKind Kind =
- InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation());
+ InitializationKind::CreateCopy(expr->getBeginLoc(), SourceLocation());
InitializationSequence Seq(SemaRef, Entity, Kind, expr,
/*TopLevelOfInitList*/ true);
@@ -1356,8 +1386,8 @@ void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
// This is an extension in C. (The builtin _Complex type does not exist
// in the C++ standard.)
if (!SemaRef.getLangOpts().CPlusPlus && !VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::ext_complex_component_init)
- << IList->getSourceRange();
+ SemaRef.Diag(IList->getBeginLoc(), diag::ext_complex_component_init)
+ << IList->getSourceRange();
// Initialize the complex number.
QualType elementType = DeclType->getAs<ComplexType>()->getElementType();
@@ -1378,11 +1408,11 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(),
- SemaRef.getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_empty_scalar_initializer :
- diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_scalar_initializer
+ : diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
hadError = !SemaRef.getLangOpts().CPlusPlus11;
++Index;
++StructuredIndex;
@@ -1394,18 +1424,17 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
// FIXME: This is invalid, and accepting it causes overload resolution
// to pick the wrong overload in some corner cases.
if (!VerifyOnly)
- SemaRef.Diag(SubIList->getLocStart(),
+ SemaRef.Diag(SubIList->getBeginLoc(),
diag::ext_many_braces_around_scalar_init)
- << SubIList->getSourceRange();
+ << SubIList->getSourceRange();
CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
StructuredIndex);
return;
} else if (isa<DesignatedInitExpr>(expr)) {
if (!VerifyOnly)
- SemaRef.Diag(expr->getLocStart(),
- diag::err_designator_for_scalar_init)
- << DeclType << expr->getSourceRange();
+ SemaRef.Diag(expr->getBeginLoc(), diag::err_designator_for_scalar_init)
+ << DeclType << expr->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1420,8 +1449,8 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
}
ExprResult Result =
- SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr,
- /*TopLevelOfInitList=*/true);
+ SemaRef.PerformCopyInitialization(Entity, expr->getBeginLoc(), expr,
+ /*TopLevelOfInitList=*/true);
Expr *ResultExpr = nullptr;
@@ -1453,10 +1482,9 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
// so that we know the location (or decl) of the "current object" being
// initialized.
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(),
- diag::err_init_reference_member_uninitialized)
- << DeclType
- << IList->getSourceRange();
+ SemaRef.Diag(IList->getBeginLoc(),
+ diag::err_init_reference_member_uninitialized)
+ << DeclType << IList->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1466,8 +1494,8 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
Expr *expr = IList->getInit(Index);
if (isa<InitListExpr>(expr) && !SemaRef.getLangOpts().CPlusPlus11) {
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
- << DeclType << IList->getSourceRange();
+ SemaRef.Diag(IList->getBeginLoc(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1482,7 +1510,7 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
}
ExprResult Result =
- SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr,
+ SemaRef.PerformCopyInitialization(Entity, expr->getBeginLoc(), expr,
/*TopLevelOfInitList=*/true);
if (Result.isInvalid())
@@ -1513,7 +1541,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
if (VerifyOnly)
CheckEmptyInitializable(
InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity),
- IList->getLocEnd());
+ IList->getEndLoc());
return;
}
@@ -1529,9 +1557,9 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
return;
}
- ExprResult Result =
- SemaRef.PerformCopyInitialization(Entity, Init->getLocStart(), Init,
- /*TopLevelOfInitList=*/true);
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, Init->getBeginLoc(), Init,
+ /*TopLevelOfInitList=*/true);
Expr *ResultExpr = nullptr;
if (Result.isInvalid())
@@ -1560,7 +1588,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
// Don't attempt to go past the end of the init list
if (Index >= IList->getNumInits()) {
if (VerifyOnly)
- CheckEmptyInitializable(ElementEntity, IList->getLocEnd());
+ CheckEmptyInitializable(ElementEntity, IList->getEndLoc());
break;
}
@@ -1586,7 +1614,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
//
// Because of this, explicitly call out that it is non-portable.
//
- SemaRef.Diag(IList->getLocStart(),
+ SemaRef.Diag(IList->getBeginLoc(),
diag::warn_neon_vector_initializer_non_portable);
const char *typeCode;
@@ -1601,11 +1629,11 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
else
llvm_unreachable("Invalid element type!");
- SemaRef.Diag(IList->getLocStart(),
- SemaRef.Context.getTypeSize(VT) > 64 ?
- diag::note_neon_vector_initializer_non_portable_q :
- diag::note_neon_vector_initializer_non_portable)
- << typeCode << typeSize;
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.Context.getTypeSize(VT) > 64
+ ? diag::note_neon_vector_initializer_non_portable_q
+ : diag::note_neon_vector_initializer_non_portable)
+ << typeCode << typeSize;
}
return;
@@ -1646,9 +1674,9 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
// OpenCL requires all elements to be initialized.
if (numEltsInit != maxElements) {
if (!VerifyOnly)
- SemaRef.Diag(IList->getLocStart(),
+ SemaRef.Diag(IList->getBeginLoc(),
diag::err_vector_incorrect_num_initializers)
- << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ << (numEltsInit < maxElements) << maxElements << numEltsInit;
hadError = true;
}
}
@@ -1686,9 +1714,9 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
// earlier, but I don't know where clang accepts VLAs (gcc accepts
// them in all sorts of strange places).
if (!VerifyOnly)
- SemaRef.Diag(VAT->getSizeExpr()->getLocStart(),
- diag::err_variable_object_no_init)
- << VAT->getSizeExpr()->getSourceRange();
+ SemaRef.Diag(VAT->getSizeExpr()->getBeginLoc(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
hadError = true;
++Index;
++StructuredIndex;
@@ -1765,8 +1793,7 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
if (maxElements == Zero && !Entity.isVariableLengthArrayNew()) {
// Sizing an array implicitly to zero is not allowed by ISO C,
// but is supported by GNU.
- SemaRef.Diag(IList->getLocStart(),
- diag::ext_typecheck_zero_array_size);
+ SemaRef.Diag(IList->getBeginLoc(), diag::ext_typecheck_zero_array_size);
}
DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements,
@@ -1780,9 +1807,9 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
// FIXME: This needs to detect holes left by designated initializers too.
if ((maxElementsKnown && elementIndex < maxElements) ||
Entity.isVariableLengthArrayNew())
- CheckEmptyInitializable(InitializedEntity::InitializeElement(
- SemaRef.Context, 0, Entity),
- IList->getLocEnd());
+ CheckEmptyInitializable(
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity),
+ IList->getEndLoc());
}
}
@@ -1815,9 +1842,8 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
}
if (!VerifyOnly) {
- SemaRef.Diag(InitExpr->getLocStart(),
- FlexArrayDiag)
- << InitExpr->getLocStart();
+ SemaRef.Diag(InitExpr->getBeginLoc(), FlexArrayDiag)
+ << InitExpr->getBeginLoc();
SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
<< Field;
}
@@ -1825,6 +1851,30 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
return FlexArrayDiag != diag::ext_flexible_array_init;
}
+/// Check if the type of a class element has an accessible destructor.
+///
+/// Aggregate initialization requires a class element's destructor be
+/// accessible per 11.6.1 [dcl.init.aggr]:
+///
+/// The destructor for each element of class type is potentially invoked
+/// (15.4 [class.dtor]) from the context where the aggregate initialization
+/// occurs.
+static bool hasAccessibleDestructor(QualType ElementType, SourceLocation Loc,
+ Sema &SemaRef) {
+ auto *CXXRD = ElementType->getAsCXXRecordDecl();
+ if (!CXXRD)
+ return false;
+
+ CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(CXXRD);
+ SemaRef.CheckDestructorAccess(Loc, Destructor,
+ SemaRef.PDiag(diag::err_access_dtor_temp)
+ << ElementType);
+ SemaRef.MarkFunctionReferenced(Loc, Destructor);
+ if (SemaRef.DiagnoseUseOfDecl(Destructor, Loc))
+ return true;
+ return false;
+}
+
void InitListChecker::CheckStructUnionTypes(
const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType,
CXXRecordDecl::base_class_range Bases, RecordDecl::field_iterator Field,
@@ -1845,6 +1895,15 @@ void InitListChecker::CheckStructUnionTypes(
if (DeclType->isUnionType() && IList->getNumInits() == 0) {
RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ if (!VerifyOnly)
+ for (FieldDecl *FD : RD->fields()) {
+ QualType ET = SemaRef.Context.getBaseElementType(FD->getType());
+ if (hasAccessibleDestructor(ET, IList->getEndLoc(), SemaRef)) {
+ hadError = true;
+ return;
+ }
+ }
+
// If there's a default initializer, use it.
if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->hasInClassInitializer()) {
if (VerifyOnly)
@@ -1867,7 +1926,7 @@ void InitListChecker::CheckStructUnionTypes(
if (VerifyOnly)
CheckEmptyInitializable(
InitializedEntity::InitializeMember(*Field, &Entity),
- IList->getLocEnd());
+ IList->getEndLoc());
else
StructuredList->setInitializedFieldInUnion(*Field);
break;
@@ -1881,13 +1940,13 @@ void InitListChecker::CheckStructUnionTypes(
// If we have any base classes, they are initialized prior to the fields.
for (auto &Base : Bases) {
Expr *Init = Index < IList->getNumInits() ? IList->getInit(Index) : nullptr;
- SourceLocation InitLoc = Init ? Init->getLocStart() : IList->getLocEnd();
// Designated inits always initialize fields, so if we see one, all
// remaining base classes have no explicit initializer.
if (Init && isa<DesignatedInitExpr>(Init))
Init = nullptr;
+ SourceLocation InitLoc = Init ? Init->getBeginLoc() : IList->getEndLoc();
InitializedEntity BaseEntity = InitializedEntity::InitializeBase(
SemaRef.Context, &Base, false, &Entity);
if (Init) {
@@ -1897,6 +1956,12 @@ void InitListChecker::CheckStructUnionTypes(
} else if (VerifyOnly) {
CheckEmptyInitializable(BaseEntity, InitLoc);
}
+
+ if (!VerifyOnly)
+ if (hasAccessibleDestructor(Base.getType(), InitLoc, SemaRef)) {
+ hadError = true;
+ return;
+ }
}
// If structDecl is a forward declaration, this loop won't do
@@ -1907,9 +1972,11 @@ void InitListChecker::CheckStructUnionTypes(
RecordDecl::field_iterator FieldEnd = RD->field_end();
bool CheckForMissingFields =
!IList->isIdiomaticZeroInitializer(SemaRef.getLangOpts());
+ bool HasDesignatedInit = false;
while (Index < IList->getNumInits()) {
Expr *Init = IList->getInit(Index);
+ SourceLocation InitLoc = Init->getBeginLoc();
if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
// If we're not the subobject that matches up with the '{' for
@@ -1918,6 +1985,8 @@ void InitListChecker::CheckStructUnionTypes(
if (!SubobjectIsDesignatorContext)
return;
+ HasDesignatedInit = true;
+
// Handle this designated initializer. Field will be updated to
// the next field that we'll be initializing.
if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
@@ -1925,6 +1994,17 @@ void InitListChecker::CheckStructUnionTypes(
StructuredList, StructuredIndex,
true, TopLevelObject))
hadError = true;
+ else if (!VerifyOnly) {
+ // Find the field named by the designated initializer.
+ RecordDecl::field_iterator F = RD->field_begin();
+ while (std::next(F) != Field)
+ ++F;
+ QualType ET = SemaRef.Context.getBaseElementType(F->getType());
+ if (hasAccessibleDestructor(ET, InitLoc, SemaRef)) {
+ hadError = true;
+ return;
+ }
+ }
InitializedSomething = true;
@@ -1958,8 +2038,8 @@ void InitListChecker::CheckStructUnionTypes(
if (VerifyOnly)
InvalidUse = !SemaRef.CanUseDecl(*Field, TreatUnavailableAsInvalid);
else
- InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field,
- IList->getInit(Index)->getLocStart());
+ InvalidUse = SemaRef.DiagnoseUseOfDecl(
+ *Field, IList->getInit(Index)->getBeginLoc());
if (InvalidUse) {
++Index;
++Field;
@@ -1967,6 +2047,14 @@ void InitListChecker::CheckStructUnionTypes(
continue;
}
+ if (!VerifyOnly) {
+ QualType ET = SemaRef.Context.getBaseElementType(Field->getType());
+ if (hasAccessibleDestructor(ET, InitLoc, SemaRef)) {
+ hadError = true;
+ return;
+ }
+ }
+
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
@@ -2005,7 +2093,22 @@ void InitListChecker::CheckStructUnionTypes(
if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer())
CheckEmptyInitializable(
InitializedEntity::InitializeMember(*Field, &Entity),
- IList->getLocEnd());
+ IList->getEndLoc());
+ }
+ }
+
+ // Check that the types of the remaining fields have accessible destructors.
+ if (!VerifyOnly) {
+ // If the initializer expression has a designated initializer, check the
+ // elements for which a designated initializer is not provided too.
+ RecordDecl::field_iterator I = HasDesignatedInit ? RD->field_begin()
+ : Field;
+ for (RecordDecl::field_iterator E = RD->field_end(); I != E; ++I) {
+ QualType ET = SemaRef.Context.getBaseElementType(I->getType());
+ if (hasAccessibleDestructor(ET, IList->getEndLoc(), SemaRef)) {
+ hadError = true;
+ return;
+ }
}
}
@@ -2182,11 +2285,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
ExistingInit = StructuredList->getArrayFiller();
if (!ExistingInit)
- StructuredList =
- getStructuredSubobjectInit(IList, Index, CurrentObjectType,
- StructuredList, StructuredIndex,
- SourceRange(D->getLocStart(),
- DIE->getLocEnd()));
+ StructuredList = getStructuredSubobjectInit(
+ IList, Index, CurrentObjectType, StructuredList, StructuredIndex,
+ SourceRange(D->getBeginLoc(), DIE->getEndLoc()));
else if (InitListExpr *Result = dyn_cast<InitListExpr>(ExistingInit))
StructuredList = Result;
else {
@@ -2194,10 +2295,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
dyn_cast<DesignatedInitUpdateExpr>(ExistingInit))
StructuredList = E->getUpdater();
else {
- DesignatedInitUpdateExpr *DIUE =
- new (SemaRef.Context) DesignatedInitUpdateExpr(SemaRef.Context,
- D->getLocStart(), ExistingInit,
- DIE->getLocEnd());
+ DesignatedInitUpdateExpr *DIUE = new (SemaRef.Context)
+ DesignatedInitUpdateExpr(SemaRef.Context, D->getBeginLoc(),
+ ExistingInit, DIE->getEndLoc());
StructuredList->updateInit(SemaRef.Context, StructuredIndex, DIUE);
StructuredList = DIUE->getUpdater();
}
@@ -2222,14 +2322,13 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Here, xs[0].a == 0 and xs[0].b == 3, since the second,
// designated initializer re-initializes the whole
// subobject [0], overwriting previous initializers.
- SemaRef.Diag(D->getLocStart(),
+ SemaRef.Diag(D->getBeginLoc(),
diag::warn_subobject_initializer_overrides)
- << SourceRange(D->getLocStart(), DIE->getLocEnd());
+ << SourceRange(D->getBeginLoc(), DIE->getEndLoc());
- SemaRef.Diag(ExistingInit->getLocStart(),
+ SemaRef.Diag(ExistingInit->getBeginLoc(),
diag::note_previous_initializer)
- << /*FIXME:has side effects=*/0
- << ExistingInit->getSourceRange();
+ << /*FIXME:has side effects=*/0 << ExistingInit->getSourceRange();
}
}
}
@@ -2350,10 +2449,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
SemaRef.Diag(D->getFieldLoc(),
diag::warn_initializer_overrides)
<< D->getSourceRange();
- SemaRef.Diag(ExistingInit->getLocStart(),
+ SemaRef.Diag(ExistingInit->getBeginLoc(),
diag::note_previous_initializer)
- << /*FIXME:has side effects=*/0
- << ExistingInit->getSourceRange();
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
}
// remove existing initializer
@@ -2395,10 +2494,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (!VerifyOnly) {
DesignatedInitExpr::Designator *NextD
= DIE->getDesignator(DesigIdx + 1);
- SemaRef.Diag(NextD->getLocStart(),
- diag::err_designator_into_flexible_array_member)
- << SourceRange(NextD->getLocStart(),
- DIE->getLocEnd());
+ SemaRef.Diag(NextD->getBeginLoc(),
+ diag::err_designator_into_flexible_array_member)
+ << SourceRange(NextD->getBeginLoc(), DIE->getEndLoc());
SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
<< *Field;
}
@@ -2409,9 +2507,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
!isa<StringLiteral>(DIE->getInit())) {
// The initializer is not an initializer list.
if (!VerifyOnly) {
- SemaRef.Diag(DIE->getInit()->getLocStart(),
- diag::err_flexible_array_init_needs_braces)
- << DIE->getInit()->getSourceRange();
+ SemaRef.Diag(DIE->getInit()->getBeginLoc(),
+ diag::err_flexible_array_init_needs_braces)
+ << DIE->getInit()->getSourceRange();
SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
<< *Field;
}
@@ -2553,10 +2651,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
if (DesignatedEndIndex >= MaxElements) {
if (!VerifyOnly)
- SemaRef.Diag(IndexExpr->getLocStart(),
- diag::err_array_designator_too_large)
- << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
- << IndexExpr->getSourceRange();
+ SemaRef.Diag(IndexExpr->getBeginLoc(),
+ diag::err_array_designator_too_large)
+ << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
+ << IndexExpr->getSourceRange();
++Index;
return true;
}
@@ -2728,10 +2826,8 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
SemaRef.Diag(InitRange.getBegin(),
diag::warn_subobject_initializer_overrides)
<< InitRange;
- SemaRef.Diag(ExistingInit->getLocStart(),
- diag::note_previous_initializer)
- << /*FIXME:has side effects=*/0
- << ExistingInit->getSourceRange();
+ SemaRef.Diag(ExistingInit->getBeginLoc(), diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0 << ExistingInit->getSourceRange();
}
InitListExpr *Result
@@ -2810,14 +2906,11 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
// There is an overwrite taking place because the first braced initializer
// list "{ .a = 2 }' already provides value for .p.b (which is zero).
if (PrevInit->getSourceRange().isValid()) {
- SemaRef.Diag(expr->getLocStart(),
- diag::warn_initializer_overrides)
- << expr->getSourceRange();
+ SemaRef.Diag(expr->getBeginLoc(), diag::warn_initializer_overrides)
+ << expr->getSourceRange();
- SemaRef.Diag(PrevInit->getLocStart(),
- diag::note_previous_initializer)
- << /*FIXME:has side effects=*/0
- << PrevInit->getSourceRange();
+ SemaRef.Diag(PrevInit->getBeginLoc(), diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0 << PrevInit->getSourceRange();
}
}
@@ -2833,7 +2926,7 @@ void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
/// value of the constant expression.
static ExprResult
CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
- SourceLocation Loc = Index->getLocStart();
+ SourceLocation Loc = Index->getBeginLoc();
// Make sure this is an integer constant expression.
ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value);
@@ -2941,8 +3034,8 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
Init.getAs<Expr>());
if (!getLangOpts().C99)
- Diag(DIE->getLocStart(), diag::ext_designated_init)
- << DIE->getSourceRange();
+ Diag(DIE->getBeginLoc(), diag::ext_designated_init)
+ << DIE->getSourceRange();
return DIE;
}
@@ -3172,8 +3265,7 @@ void InitializationSequence::Step::Destroy() {
case SK_StdInitializerList:
case SK_StdInitializerListConstructorCall:
case SK_OCLSamplerInit:
- case SK_OCLZeroEvent:
- case SK_OCLZeroQueue:
+ case SK_OCLZeroOpaqueType:
break;
case SK_ConversionSequence:
@@ -3459,16 +3551,9 @@ void InitializationSequence::AddOCLSamplerInitStep(QualType T) {
Steps.push_back(S);
}
-void InitializationSequence::AddOCLZeroEventStep(QualType T) {
+void InitializationSequence::AddOCLZeroOpaqueTypeStep(QualType T) {
Step S;
- S.Kind = SK_OCLZeroEvent;
- S.Type = T;
- Steps.push_back(S);
-}
-
-void InitializationSequence::AddOCLZeroQueueStep(QualType T) {
- Step S;
- S.Kind = SK_OCLZeroQueue;
+ S.Kind = SK_OCLZeroOpaqueType;
S.Type = T;
Steps.push_back(S);
}
@@ -3507,11 +3592,11 @@ maybeRecoverWithZeroInitialization(Sema &S, InitializationSequence &Sequence,
return false;
VarDecl *VD = cast<VarDecl>(Entity.getDecl());
- if (VD->getInit() || VD->getLocEnd().isMacroID())
+ if (VD->getInit() || VD->getEndLoc().isMacroID())
return false;
QualType VariableTy = VD->getType().getCanonicalType();
- SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd());
+ SourceLocation Loc = S.getLocForEndOfToken(VD->getEndLoc());
std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc);
if (!Init.empty()) {
Sequence.AddZeroInitializationStep(Entity.getType());
@@ -3583,7 +3668,7 @@ static bool TryInitializerListConstruction(Sema &S,
InitializedEntity HiddenArray =
InitializedEntity::InitializeTemporary(ArrayType);
InitializationKind Kind = InitializationKind::CreateDirectList(
- List->getExprLoc(), List->getLocStart(), List->getLocEnd());
+ List->getExprLoc(), List->getBeginLoc(), List->getEndLoc());
TryListInitialization(S, HiddenArray, Kind, List, Sequence,
TreatUnavailableAsInvalid);
if (Sequence)
@@ -3974,7 +4059,7 @@ static void TryReferenceListInitialization(Sema &S,
T1, Sequence))
return;
- SourceLocation DeclLoc = Initializer->getLocStart();
+ SourceLocation DeclLoc = Initializer->getBeginLoc();
bool dummy1, dummy2, dummy3;
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, dummy1,
@@ -4031,7 +4116,7 @@ static void TryListInitialization(Sema &S,
}
if (DestType->isRecordType() &&
- !S.isCompleteType(InitList->getLocStart(), DestType)) {
+ !S.isCompleteType(InitList->getBeginLoc(), DestType)) {
Sequence.setIncompleteTypeFailure(DestType);
return;
}
@@ -4051,7 +4136,7 @@ static void TryListInitialization(Sema &S,
if (DestType->isRecordType()) {
QualType InitType = InitList->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
- S.IsDerivedFrom(InitList->getLocStart(), InitType, DestType)) {
+ S.IsDerivedFrom(InitList->getBeginLoc(), InitType, DestType)) {
Expr *InitListAsExpr = InitList;
TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType,
DestType, Sequence,
@@ -4218,9 +4303,8 @@ static OverloadingResult TryRefInitWithConversionFunction(
bool DerivedToBase;
bool ObjCConversion;
bool ObjCLifetimeConversion;
- assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
- T1, T2, DerivedToBase,
- ObjCConversion,
+ assert(!S.CompareReferenceRelationship(Initializer->getBeginLoc(), T1, T2,
+ DerivedToBase, ObjCConversion,
ObjCLifetimeConversion) &&
"Must have incompatible references when binding via conversion");
(void)DerivedToBase;
@@ -4313,7 +4397,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
return OR_No_Viable_Function;
- SourceLocation DeclLoc = Initializer->getLocStart();
+ SourceLocation DeclLoc = Initializer->getBeginLoc();
// Perform overload resolution. If it fails, return the failed result.
OverloadCandidateSet::iterator Best;
@@ -4439,7 +4523,7 @@ static void TryReferenceInitializationCore(Sema &S,
Qualifiers T2Quals,
InitializationSequence &Sequence) {
QualType DestType = Entity.getType();
- SourceLocation DeclLoc = Initializer->getLocStart();
+ SourceLocation DeclLoc = Initializer->getBeginLoc();
// Compute some basic properties of the types and the initializer.
bool isLValueRef = DestType->isLValueReferenceType();
bool isRValueRef = !isLValueRef;
@@ -4585,11 +4669,22 @@ static void TryReferenceInitializationCore(Sema &S,
// If the converted initializer is a prvalue, its type T4 is adjusted
// to type "cv1 T4" and the temporary materialization conversion is
// applied.
+ // Postpone address space conversions to after the temporary materialization
+ // conversion to allow creating temporaries in the alloca address space.
+ auto AS1 = T1Quals.getAddressSpace();
+ auto AS2 = T2Quals.getAddressSpace();
+ T1Quals.removeAddressSpace();
+ T2Quals.removeAddressSpace();
QualType cv1T4 = S.Context.getQualifiedType(cv2T2, T1Quals);
if (T1Quals != T2Quals)
Sequence.AddQualificationConversionStep(cv1T4, ValueKind);
Sequence.AddReferenceBindingStep(cv1T4, ValueKind == VK_RValue);
ValueKind = isLValueRef ? VK_LValue : VK_XValue;
+ if (AS1 != AS2) {
+ T1Quals.addAddressSpace(AS1);
+ QualType cv1AST4 = S.Context.getQualifiedType(cv2T2, T1Quals);
+ Sequence.AddQualificationConversionStep(cv1AST4, ValueKind);
+ }
// In any case, the reference is bound to the resulting glvalue (or to
// an appropriate base class subobject).
@@ -4867,7 +4962,7 @@ static void TryUserDefinedConversion(Sema &S,
}
}
- SourceLocation DeclLoc = Initializer->getLocStart();
+ SourceLocation DeclLoc = Initializer->getBeginLoc();
if (const RecordType *SourceRecordType = SourceType->getAs<RecordType>()) {
// The type we're converting from is a class type, enumerate its conversion
@@ -5172,39 +5267,51 @@ static bool TryOCLSamplerInitialization(Sema &S,
return true;
}
-//
-// OpenCL 1.2 spec, s6.12.10
-//
-// The event argument can also be used to associate the
-// async_work_group_copy with a previous async copy allowing
-// an event to be shared by multiple async copies; otherwise
-// event should be zero.
-//
-static bool TryOCLZeroEventInitialization(Sema &S,
- InitializationSequence &Sequence,
- QualType DestType,
- Expr *Initializer) {
- if (!S.getLangOpts().OpenCL || !DestType->isEventT() ||
- !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
- (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
- return false;
-
- Sequence.AddOCLZeroEventStep(DestType);
- return true;
+static bool IsZeroInitializer(Expr *Initializer, Sema &S) {
+ return Initializer->isIntegerConstantExpr(S.getASTContext()) &&
+ (Initializer->EvaluateKnownConstInt(S.getASTContext()) == 0);
}
-static bool TryOCLZeroQueueInitialization(Sema &S,
- InitializationSequence &Sequence,
- QualType DestType,
- Expr *Initializer) {
- if (!S.getLangOpts().OpenCL || S.getLangOpts().OpenCLVersion < 200 ||
- !DestType->isQueueT() ||
- !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
- (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
+static bool TryOCLZeroOpaqueTypeInitialization(Sema &S,
+ InitializationSequence &Sequence,
+ QualType DestType,
+ Expr *Initializer) {
+ if (!S.getLangOpts().OpenCL)
return false;
- Sequence.AddOCLZeroQueueStep(DestType);
- return true;
+ //
+ // OpenCL 1.2 spec, s6.12.10
+ //
+ // The event argument can also be used to associate the
+ // async_work_group_copy with a previous async copy allowing
+ // an event to be shared by multiple async copies; otherwise
+ // event should be zero.
+ //
+ if (DestType->isEventT() || DestType->isQueueT()) {
+ if (!IsZeroInitializer(Initializer, S))
+ return false;
+
+ Sequence.AddOCLZeroOpaqueTypeStep(DestType);
+ return true;
+ }
+
+ // We should allow zero initialization for all types defined in the
+ // cl_intel_device_side_avc_motion_estimation extension, except
+ // intel_sub_group_avc_mce_payload_t and intel_sub_group_avc_mce_result_t.
+ if (S.getOpenCLOptions().isEnabled(
+ "cl_intel_device_side_avc_motion_estimation") &&
+ DestType->isOCLIntelSubgroupAVCType()) {
+ if (DestType->isOCLIntelSubgroupAVCMcePayloadType() ||
+ DestType->isOCLIntelSubgroupAVCMceResultType())
+ return false;
+ if (!IsZeroInitializer(Initializer, S))
+ return false;
+
+ Sequence.AddOCLZeroOpaqueTypeStep(DestType);
+ return true;
+ }
+
+ return false;
}
InitializationSequence::InitializationSequence(Sema &S,
@@ -5309,8 +5416,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
Expr *Initializer = nullptr;
if (Args.size() == 1) {
Initializer = Args[0];
- if (S.getLangOpts().ObjC1) {
- if (S.CheckObjCBridgeRelatedConversions(Initializer->getLocStart(),
+ if (S.getLangOpts().ObjC) {
+ if (S.CheckObjCBridgeRelatedConversions(Initializer->getBeginLoc(),
DestType, Initializer->getType(),
Initializer) ||
S.ConversionToObjCStringLiteralCheck(DestType, Initializer))
@@ -5431,7 +5538,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
// array from a compound literal that creates an array of the same
// type, so long as the initializer has no side effects.
if (!S.getLangOpts().CPlusPlus && Initializer &&
- isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
+ (isa<ConstantExpr>(Initializer->IgnoreParens()) ||
+ isa<CompoundLiteralExpr>(Initializer->IgnoreParens())) &&
Initializer->getType()->isArrayType()) {
const ArrayType *SourceAT
= Context.getAsArrayType(Initializer->getType());
@@ -5478,12 +5586,9 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (TryOCLSamplerInitialization(S, *this, DestType, Initializer))
return;
- if (TryOCLZeroEventInitialization(S, *this, DestType, Initializer))
+ if (TryOCLZeroOpaqueTypeInitialization(S, *this, DestType, Initializer))
return;
- if (TryOCLZeroQueueInitialization(S, *this, DestType, Initializer))
- return;
-
// Handle initialization in C
AddCAssignmentStep(DestType);
MaybeProduceObjCObject(S, *this, Entity);
@@ -5501,7 +5606,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (Kind.getKind() == InitializationKind::IK_Direct ||
(Kind.getKind() == InitializationKind::IK_Copy &&
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
- S.IsDerivedFrom(Initializer->getLocStart(), SourceType, DestType))))
+ S.IsDerivedFrom(Initializer->getBeginLoc(), SourceType, DestType))))
TryConstructorInitialization(S, Entity, Kind, Args,
DestType, DestType, *this);
// - Otherwise (i.e., for the remaining copy-initialization cases),
@@ -5535,7 +5640,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
bool NeedAtomicConversion = false;
if (const AtomicType *Atomic = DestType->getAs<AtomicType>()) {
if (Context.hasSameUnqualifiedType(SourceType, Atomic->getValueType()) ||
- S.IsDerivedFrom(Initializer->getLocStart(), SourceType,
+ S.IsDerivedFrom(Initializer->getBeginLoc(), SourceType,
Atomic->getValueType())) {
DestType = Atomic->getValueType();
NeedAtomicConversion = true;
@@ -5758,7 +5863,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
case InitializedEntity::EK_LambdaToBlockConversionBlockElement:
case InitializedEntity::EK_CompoundLiteralInit:
case InitializedEntity::EK_RelatedResult:
- return Initializer->getLocStart();
+ return Initializer->getBeginLoc();
}
llvm_unreachable("missed an InitializedEntity kind?");
}
@@ -6092,7 +6197,10 @@ PerformConstructorInitialization(Sema &S,
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
if (!TSInfo)
TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
- SourceRange ParenOrBraceRange = Kind.getParenOrBraceRange();
+ SourceRange ParenOrBraceRange =
+ (Kind.getKind() == InitializationKind::IK_DirectList)
+ ? SourceRange(LBraceLoc, RBraceLoc)
+ : Kind.getParenOrBraceRange();
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
@@ -6102,7 +6210,7 @@ PerformConstructorInitialization(Sema &S,
}
S.MarkFunctionReferenced(Loc, Constructor);
- CurInit = new (S.Context) CXXTemporaryObjectExpr(
+ CurInit = CXXTemporaryObjectExpr::Create(
S.Context, Constructor,
Entity.getType().getNonLValueExprType(S.Context), TSInfo,
ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
@@ -6353,7 +6461,7 @@ static bool isVarOnPath(IndirectLocalPath &Path, VarDecl *VD) {
}
static bool pathContainsInit(IndirectLocalPath &Path) {
- return std::any_of(Path.begin(), Path.end(), [=](IndirectLocalPathEntry E) {
+ return llvm::any_of(Path, [=](IndirectLocalPathEntry E) {
return E.Kind == IndirectLocalPathEntry::DefaultInit ||
E.Kind == IndirectLocalPathEntry::VarInit;
});
@@ -6371,10 +6479,14 @@ static bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) {
const TypeSourceInfo *TSI = FD->getTypeSourceInfo();
if (!TSI)
return false;
+ // Don't declare this variable in the second operand of the for-statement;
+ // GCC miscompiles that by ending its lifetime before evaluating the
+ // third operand. See gcc.gnu.org/PR86769.
+ AttributedTypeLoc ATL;
for (TypeLoc TL = TSI->getTypeLoc();
- auto ATL = TL.getAsAdjusted<AttributedTypeLoc>();
+ (ATL = TL.getAsAdjusted<AttributedTypeLoc>());
TL = ATL.getModifiedLoc()) {
- if (ATL.getAttrKind() == AttributedType::attr_lifetimebound)
+ if (ATL.getAttrAs<LifetimeBoundAttr>())
return true;
}
return false;
@@ -6437,8 +6549,8 @@ static void visitLocalsRetainedByReferenceBinding(IndirectLocalPath &Path,
do {
Old = Init;
- if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
- Init = EWC->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
// If this is just redundant braces around an initializer, step over it.
@@ -6561,8 +6673,8 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
Init = DIE->getExpr();
}
- if (auto *EWC = dyn_cast<ExprWithCleanups>(Init))
- Init = EWC->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
// Dig out the expression which constructs the extended temporary.
Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
@@ -6694,6 +6806,20 @@ static void visitLocalsRetainedByInitializer(IndirectLocalPath &Path,
return;
}
+ // The lifetime of an init-capture is that of the closure object constructed
+ // by a lambda-expression.
+ if (auto *LE = dyn_cast<LambdaExpr>(Init)) {
+ for (Expr *E : LE->capture_inits()) {
+ if (!E)
+ continue;
+ if (E->isGLValue())
+ visitLocalsRetainedByReferenceBinding(Path, E, RK_ReferenceBinding,
+ Visit);
+ else
+ visitLocalsRetainedByInitializer(Path, E, Visit, true);
+ }
+ }
+
if (isa<CallExpr>(Init) || isa<CXXConstructExpr>(Init))
return visitLifetimeBoundArguments(Path, Init, Visit);
@@ -6938,6 +7064,10 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
} else if (isa<BlockExpr>(L)) {
Diag(DiagLoc, diag::err_ret_local_block) << DiagRange;
} else if (isa<AddrLabelExpr>(L)) {
+ // Don't warn when returning a label from a statement expression.
+ // Leaving the scope doesn't end its lifetime.
+ if (LK == LK_StmtExprResult)
+ return false;
Diag(DiagLoc, diag::warn_ret_addr_label) << DiagRange;
} else {
Diag(DiagLoc, diag::warn_ret_local_temp_addr_ref)
@@ -7063,18 +7193,18 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
return;
}
- S.Diag(CE->getLocStart(), DiagID);
+ S.Diag(CE->getBeginLoc(), DiagID);
// Get all the locations for a fix-it. Don't emit the fix-it if any location
// is within a macro.
- SourceLocation CallBegin = CE->getCallee()->getLocStart();
+ SourceLocation CallBegin = CE->getCallee()->getBeginLoc();
if (CallBegin.isMacroID())
return;
SourceLocation RParen = CE->getRParenLoc();
if (RParen.isMacroID())
return;
SourceLocation LParen;
- SourceLocation ArgLoc = Arg->getLocStart();
+ SourceLocation ArgLoc = Arg->getBeginLoc();
// Special testing for the argument location. Since the fix-it needs the
// location right before the argument, the argument location can be in a
@@ -7089,7 +7219,7 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
LParen = ArgLoc.getLocWithOffset(-1);
- S.Diag(CE->getLocStart(), diag::note_remove_move)
+ S.Diag(CE->getBeginLoc(), diag::note_remove_move)
<< FixItHint::CreateRemoval(SourceRange(CallBegin, LParen))
<< FixItHint::CreateRemoval(SourceRange(RParen, RParen));
}
@@ -7142,12 +7272,20 @@ ExprResult Sema::TemporaryMaterializationConversion(Expr *E) {
return CreateMaterializeTemporaryExpr(E->getType(), E, false);
}
-ExprResult
-InitializationSequence::Perform(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- MultiExprArg Args,
- QualType *ResultType) {
+ExprResult Sema::PerformQualificationConversion(Expr *E, QualType Ty,
+ ExprValueKind VK,
+ CheckedConversionKind CCK) {
+ CastKind CK = (Ty.getAddressSpace() != E->getType().getAddressSpace())
+ ? CK_AddressSpaceConversion
+ : CK_NoOp;
+ return ImpCastExprToType(E, Ty, CK, VK, /*BasePath=*/nullptr, CCK);
+}
+
+ExprResult InitializationSequence::Perform(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ QualType *ResultType) {
if (Failed()) {
Diagnose(S, Entity, Kind, Args);
return ExprError();
@@ -7231,8 +7369,8 @@ InitializationSequence::Perform(Sema &S,
// from an initializer list. For parameters, we produce a better warning
// elsewhere.
Expr *Init = Args[0];
- S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init)
- << Init->getSourceRange();
+ S.Diag(Init->getBeginLoc(), diag::warn_cxx98_compat_reference_list_init)
+ << Init->getSourceRange();
}
// OpenCL v2.0 s6.13.11.1. atomic variables can be initialized in global scope
@@ -7244,8 +7382,9 @@ InitializationSequence::Perform(Sema &S,
if (S.getLangOpts().OpenCLVersion >= 200 &&
ETy->isAtomicType() && !HasGlobalAS &&
Entity.getKind() == InitializedEntity::EK_Variable && Args.size() > 0) {
- S.Diag(Args[0]->getLocStart(), diag::err_opencl_atomic_init) << 1 <<
- SourceRange(Entity.getDecl()->getLocStart(), Args[0]->getLocEnd());
+ S.Diag(Args[0]->getBeginLoc(), diag::err_opencl_atomic_init)
+ << 1
+ << SourceRange(Entity.getDecl()->getBeginLoc(), Args[0]->getEndLoc());
return ExprError();
}
@@ -7296,8 +7435,7 @@ InitializationSequence::Perform(Sema &S,
case SK_ProduceObjCObject:
case SK_StdInitializerList:
case SK_OCLSamplerInit:
- case SK_OCLZeroEvent:
- case SK_OCLZeroQueue: {
+ case SK_OCLZeroOpaqueType: {
assert(Args.size() == 1);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
@@ -7361,10 +7499,9 @@ InitializationSequence::Perform(Sema &S,
// Casts to inaccessible base classes are allowed with C-style casts.
bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast();
- if (S.CheckDerivedToBaseConversion(SourceType, Step->Type,
- CurInit.get()->getLocStart(),
- CurInit.get()->getSourceRange(),
- &BasePath, IgnoreBaseAccess))
+ if (S.CheckDerivedToBaseConversion(
+ SourceType, Step->Type, CurInit.get()->getBeginLoc(),
+ CurInit.get()->getSourceRange(), &BasePath, IgnoreBaseAccess))
return ExprError();
ExprValueKind VK =
@@ -7393,7 +7530,7 @@ InitializationSequence::Perform(Sema &S,
if (auto *DRE = dyn_cast<DeclRefExpr>(CurInit.get()->IgnoreParens())) {
if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
if (!S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
- DRE->getLocStart()))
+ DRE->getBeginLoc()))
return ExprError();
}
}
@@ -7454,7 +7591,7 @@ InitializationSequence::Perform(Sema &S,
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Fn)) {
// Build a call to the selected constructor.
SmallVector<Expr*, 8> ConstructorArgs;
- SourceLocation Loc = CurInit.get()->getLocStart();
+ SourceLocation Loc = CurInit.get()->getBeginLoc();
// Determine the arguments required to actually perform the constructor
// call.
@@ -7521,10 +7658,10 @@ InitializationSequence::Perform(Sema &S,
if (const RecordType *Record = T->getAs<RecordType>()) {
CXXDestructorDecl *Destructor
= S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
- S.CheckDestructorAccess(CurInit.get()->getLocStart(), Destructor,
+ S.CheckDestructorAccess(CurInit.get()->getBeginLoc(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
- S.MarkFunctionReferenced(CurInit.get()->getLocStart(), Destructor);
- if (S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getLocStart()))
+ S.MarkFunctionReferenced(CurInit.get()->getBeginLoc(), Destructor);
+ if (S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getBeginLoc()))
return ExprError();
}
}
@@ -7536,12 +7673,11 @@ InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionRValue: {
// Perform a qualification conversion; these can never go wrong.
ExprValueKind VK =
- Step->Kind == SK_QualificationConversionLValue ?
- VK_LValue :
- (Step->Kind == SK_QualificationConversionXValue ?
- VK_XValue :
- VK_RValue);
- CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NoOp, VK);
+ Step->Kind == SK_QualificationConversionLValue
+ ? VK_LValue
+ : (Step->Kind == SK_QualificationConversionXValue ? VK_XValue
+ : VK_RValue);
+ CurInit = S.PerformQualificationConversion(CurInit.get(), Step->Type, VK);
break;
}
@@ -7562,6 +7698,18 @@ InitializationSequence::Perform(Sema &S,
case SK_ConversionSequence:
case SK_ConversionSequenceNoNarrowing: {
+ if (const auto *FromPtrType =
+ CurInit.get()->getType()->getAs<PointerType>()) {
+ if (const auto *ToPtrType = Step->Type->getAs<PointerType>()) {
+ if (FromPtrType->getPointeeType()->hasAttr(attr::NoDeref) &&
+ !ToPtrType->getPointeeType()->hasAttr(attr::NoDeref)) {
+ S.Diag(CurInit.get()->getExprLoc(),
+ diag::warn_noderef_to_dereferenceable_pointer)
+ << CurInit.get()->getSourceRange();
+ }
+ }
+ }
+
Sema::CheckedConversionKind CCK
= Kind.isCStyleCast()? Sema::CCK_CStyleCast
: Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
@@ -7728,6 +7876,7 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment: {
QualType SourceType = CurInit.get()->getType();
+
// Save off the initial CurInit in case we need to emit a diagnostic
ExprResult InitialCurInit = CurInit;
ExprResult Result = CurInit;
@@ -7863,7 +8012,7 @@ InitializationSequence::Perform(Sema &S,
}
case SK_OCLSamplerInit: {
- // Sampler initialzation have 5 cases:
+ // Sampler initialization have 5 cases:
// 1. function argument passing
// 1a. argument is a file-scope variable
// 1b. argument is a function-scope variable
@@ -7925,8 +8074,9 @@ InitializationSequence::Perform(Sema &S,
break;
}
- llvm::APSInt Result;
- Init->EvaluateAsInt(Result, S.Context);
+ Expr::EvalResult EVResult;
+ Init->EvaluateAsInt(EVResult, S.Context);
+ llvm::APSInt Result = EVResult.Val.getInt();
const uint64_t SamplerValue = Result.getLimitedValue();
// 32-bit value of sampler's initializer is interpreted as
// bit-field with the following structure:
@@ -7936,7 +8086,9 @@ InitializationSequence::Perform(Sema &S,
// defined in SPIR spec v1.2 and also opencl-c.h
unsigned AddressingMode = (0x0E & SamplerValue) >> 1;
unsigned FilterMode = (0x30 & SamplerValue) >> 4;
- if (FilterMode != 1 && FilterMode != 2)
+ if (FilterMode != 1 && FilterMode != 2 &&
+ !S.getOpenCLOptions().isEnabled(
+ "cl_intel_device_side_avc_motion_estimation"))
S.Diag(Kind.getLocation(),
diag::warn_sampler_initializer_invalid_bits)
<< "Filter Mode";
@@ -7952,21 +8104,13 @@ InitializationSequence::Perform(Sema &S,
CK_IntToOCLSampler);
break;
}
- case SK_OCLZeroEvent: {
- assert(Step->Type->isEventT() &&
- "Event initialization on non-event type.");
-
- CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
- CK_ZeroToOCLEvent,
- CurInit.get()->getValueKind());
- break;
- }
- case SK_OCLZeroQueue: {
- assert(Step->Type->isQueueT() &&
- "Event initialization on non queue type.");
+ case SK_OCLZeroOpaqueType: {
+ assert((Step->Type->isEventT() || Step->Type->isQueueT() ||
+ Step->Type->isOCLIntelSubgroupAVCType()) &&
+ "Wrong type for initialization of OpenCL opaque type.");
CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
- CK_ZeroToOCLQueue,
+ CK_ZeroToOCLOpaqueType,
CurInit.get()->getValueKind());
break;
}
@@ -8019,7 +8163,7 @@ static bool DiagnoseUninitializedReference(Sema &S, SourceLocation Loc,
}
for (const auto &BI : RD->bases()) {
- if (DiagnoseUninitializedReference(S, BI.getLocStart(), BI.getType())) {
+ if (DiagnoseUninitializedReference(S, BI.getBeginLoc(), BI.getType())) {
S.Diag(Loc, diag::note_value_initialization_here) << RD;
return true;
}
@@ -8074,7 +8218,7 @@ static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
// inner initialization failed.
QualType T = DestType->getAs<ReferenceType>()->getPointeeType();
diagnoseListInit(S, InitializedEntity::InitializeTemporary(T), InitList);
- SourceLocation Loc = InitList->getLocStart();
+ SourceLocation Loc = InitList->getBeginLoc();
if (auto *D = Entity.getDecl())
Loc = D->getLocation();
S.Diag(Loc, diag::note_in_reference_temporary_list_initializer) << T;
@@ -8124,7 +8268,7 @@ bool InitializationSequence::Diagnose(Sema &S,
(void)Diagnosed;
} else // FIXME: diagnostic below could be better!
S.Diag(Kind.getLocation(), diag::err_reference_has_multiple_inits)
- << SourceRange(Args.front()->getLocStart(), Args.back()->getLocEnd());
+ << SourceRange(Args.front()->getBeginLoc(), Args.back()->getEndLoc());
break;
case FK_ParenthesizedListInitForReference:
S.Diag(Kind.getLocation(), diag::err_list_init_in_parens)
@@ -8153,13 +8297,14 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_PlainStringIntoUTF8Char:
S.Diag(Kind.getLocation(),
diag::err_array_init_plain_string_into_char8_t);
- S.Diag(Args.front()->getLocStart(),
+ S.Diag(Args.front()->getBeginLoc(),
diag::note_array_init_plain_string_into_char8_t)
- << FixItHint::CreateInsertion(Args.front()->getLocStart(), "u8");
+ << FixItHint::CreateInsertion(Args.front()->getBeginLoc(), "u8");
break;
case FK_UTF8StringIntoPlainChar:
S.Diag(Kind.getLocation(),
- diag::err_array_init_utf8_string_into_char);
+ diag::err_array_init_utf8_string_into_char)
+ << S.getLangOpts().CPlusPlus2a;
break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
@@ -8189,7 +8334,7 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_AddressOfUnaddressableFunction: {
auto *FD = cast<FunctionDecl>(cast<DeclRefExpr>(OnlyArg)->getDecl());
S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
- OnlyArg->getLocStart());
+ OnlyArg->getBeginLoc());
break;
}
@@ -8335,10 +8480,10 @@ bool InitializationSequence::Diagnose(Sema &S,
auto *InitList = dyn_cast<InitListExpr>(Args[0]);
if (InitList && InitList->getNumInits() >= 1) {
- R = SourceRange(InitList->getInit(0)->getLocEnd(), InitList->getLocEnd());
+ R = SourceRange(InitList->getInit(0)->getEndLoc(), InitList->getEndLoc());
} else {
assert(Args.size() > 1 && "Expected multiple initializers!");
- R = SourceRange(Args.front()->getLocEnd(), Args.back()->getLocEnd());
+ R = SourceRange(Args.front()->getEndLoc(), Args.back()->getEndLoc());
}
R.setBegin(S.getLocForEndOfToken(R.getBegin()));
@@ -8370,8 +8515,8 @@ bool InitializationSequence::Diagnose(Sema &S,
case FK_ConstructorOverloadFailed: {
SourceRange ArgsRange;
if (Args.size())
- ArgsRange = SourceRange(Args.front()->getLocStart(),
- Args.back()->getLocEnd());
+ ArgsRange =
+ SourceRange(Args.front()->getBeginLoc(), Args.back()->getEndLoc());
if (Failure == FK_ListConstructorOverloadFailed) {
assert(Args.size() == 1 &&
@@ -8849,12 +8994,8 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "OpenCL sampler_t from integer constant";
break;
- case SK_OCLZeroEvent:
- OS << "OpenCL event_t from zero";
- break;
-
- case SK_OCLZeroQueue:
- OS << "OpenCL queue_t from zero";
+ case SK_OCLZeroOpaqueType:
+ OS << "OpenCL opaque type from zero";
break;
}
@@ -8906,7 +9047,7 @@ static void DiagnoseNarrowingInInitList(Sema &S,
// This was a floating-to-integer conversion, which is always considered a
// narrowing conversion even if the value is a constant and can be
// represented exactly as an integer.
- S.Diag(PostInit->getLocStart(), NarrowingErrs(S.getLangOpts())
+ S.Diag(PostInit->getBeginLoc(), NarrowingErrs(S.getLangOpts())
? diag::ext_init_list_type_narrowing
: diag::warn_init_list_type_narrowing)
<< PostInit->getSourceRange()
@@ -8916,7 +9057,7 @@ static void DiagnoseNarrowingInInitList(Sema &S,
case NK_Constant_Narrowing:
// A constant value was narrowed.
- S.Diag(PostInit->getLocStart(),
+ S.Diag(PostInit->getBeginLoc(),
NarrowingErrs(S.getLangOpts())
? diag::ext_init_list_constant_narrowing
: diag::warn_init_list_constant_narrowing)
@@ -8927,7 +9068,7 @@ static void DiagnoseNarrowingInInitList(Sema &S,
case NK_Variable_Narrowing:
// A variable's value may have been narrowed.
- S.Diag(PostInit->getLocStart(),
+ S.Diag(PostInit->getBeginLoc(),
NarrowingErrs(S.getLangOpts())
? diag::ext_init_list_variable_narrowing
: diag::warn_init_list_variable_narrowing)
@@ -8955,11 +9096,11 @@ static void DiagnoseNarrowingInInitList(Sema &S,
return;
}
OS << ">(";
- S.Diag(PostInit->getLocStart(), diag::note_init_list_narrowing_silence)
+ S.Diag(PostInit->getBeginLoc(), diag::note_init_list_narrowing_silence)
<< PostInit->getSourceRange()
- << FixItHint::CreateInsertion(PostInit->getLocStart(), OS.str())
+ << FixItHint::CreateInsertion(PostInit->getBeginLoc(), OS.str())
<< FixItHint::CreateInsertion(
- S.getLocForEndOfToken(PostInit->getLocEnd()), ")");
+ S.getLocForEndOfToken(PostInit->getEndLoc()), ")");
}
//===----------------------------------------------------------------------===//
@@ -8974,8 +9115,8 @@ Sema::CanPerformCopyInitialization(const InitializedEntity &Entity,
Expr *InitE = Init.get();
assert(InitE && "No initialization expression");
- InitializationKind Kind
- = InitializationKind::CreateCopy(InitE->getLocStart(), SourceLocation());
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(InitE->getBeginLoc(), SourceLocation());
InitializationSequence Seq(*this, Entity, Kind, InitE);
return !Seq.Failed();
}
@@ -8993,11 +9134,10 @@ Sema::PerformCopyInitialization(const InitializedEntity &Entity,
assert(InitE && "No initialization expression?");
if (EqualLoc.isInvalid())
- EqualLoc = InitE->getLocStart();
+ EqualLoc = InitE->getBeginLoc();
- InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(),
- EqualLoc,
- AllowExplicit);
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ InitE->getBeginLoc(), EqualLoc, AllowExplicit);
InitializationSequence Seq(*this, Entity, Kind, InitE, TopLevelOfInitList);
// Prevent infinite recursion when performing parameter copy-initialization.
@@ -9060,8 +9200,11 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
TSInfo->getType()->getContainedDeducedType());
assert(DeducedTST && "not a deduced template specialization type");
- // We can only perform deduction for class templates.
auto TemplateName = DeducedTST->getTemplateName();
+ if (TemplateName.isDependent())
+ return Context.DependentTy;
+
+ // We can only perform deduction for class templates.
auto *Template =
dyn_cast_or_null<ClassTemplateDecl>(TemplateName.getAsTemplateDecl());
if (!Template) {
@@ -9074,8 +9217,12 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
}
// Can't deduce from dependent arguments.
- if (Expr::hasAnyTypeDependentArguments(Inits))
+ if (Expr::hasAnyTypeDependentArguments(Inits)) {
+ Diag(TSInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_cxx14_compat_class_template_argument_deduction)
+ << TSInfo->getTypeLoc().getSourceRange() << 0;
return Context.DependentTy;
+ }
// FIXME: Perform "exact type" matching first, per CWG discussion?
// Or implement this via an implied 'T(T) -> T' deduction guide?
@@ -9278,5 +9425,10 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// C++ [dcl.type.class.deduct]p1:
// The placeholder is replaced by the return type of the function selected
// by overload resolution for class template deduction.
- return SubstAutoType(TSInfo->getType(), Best->Function->getReturnType());
+ QualType DeducedType =
+ SubstAutoType(TSInfo->getType(), Best->Function->getReturnType());
+ Diag(TSInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_cxx14_compat_class_template_argument_deduction)
+ << TSInfo->getTypeLoc().getSourceRange() << 1 << DeducedType;
+ return DeducedType;
}
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index 440567e032e4..af233b96d69b 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -479,7 +479,7 @@ void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
if (!LSI->ReturnType->isDependentType() &&
!LSI->ReturnType->isVoidType()) {
- if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType,
+ if (RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType,
diag::err_lambda_incomplete_result)) {
// Do nothing.
}
@@ -493,7 +493,9 @@ void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
LSI->finishedExplicitCaptures();
}
-void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
+void Sema::addLambdaParameters(
+ ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
+ CXXMethodDecl *CallOperator, Scope *CurScope) {
// Introduce our parameters into the function scope
for (unsigned p = 0, NumParams = CallOperator->getNumParams();
p < NumParams; ++p) {
@@ -501,7 +503,19 @@ void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
// If this has an identifier, add it to the scope stack.
if (CurScope && Param->getIdentifier()) {
- CheckShadow(CurScope, Param);
+ bool Error = false;
+ // Resolution of CWG 2211 in C++17 renders shadowing ill-formed, but we
+ // retroactively apply it.
+ for (const auto &Capture : Captures) {
+ if (Capture.Id == Param->getIdentifier()) {
+ Error = true;
+ Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
+ Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
+ << Capture.Id << true;
+ }
+ }
+ if (!Error)
+ CheckShadow(CurScope, Param);
PushOnScopeChains(Param, CurScope);
}
@@ -720,10 +734,9 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
// FIXME: This is a poor diagnostic for ReturnStmts without expressions.
// TODO: It's possible that the *first* return is the divergent one.
- Diag(RS->getLocStart(),
+ Diag(RS->getBeginLoc(),
diag::err_typecheck_missing_return_type_incompatible)
- << ReturnType << CSI.ReturnType
- << isa<LambdaScopeInfo>(CSI);
+ << ReturnType << CSI.ReturnType << isa<LambdaScopeInfo>(CSI);
// Continue iterating so that we keep emitting diagnostics.
}
}
@@ -746,14 +759,15 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType);
// Deduce the type of the init capture.
+ Expr *DeduceInit = Init;
QualType DeducedType = deduceVarTypeFromInitializer(
/*VarDecl*/nullptr, DeclarationName(Id), DeductType, TSI,
- SourceRange(Loc, Loc), IsDirectInit, Init);
+ SourceRange(Loc, Loc), IsDirectInit, DeduceInit);
if (DeducedType.isNull())
return QualType();
// Are we a non-list direct initialization?
- ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+ bool CXXDirectInit = isa<ParenListExpr>(Init);
// Perform initialization analysis and ensure any implicit conversions
// (such as lvalue-to-rvalue) are enforced.
@@ -762,30 +776,17 @@ QualType Sema::buildLambdaInitCaptureInitialization(SourceLocation Loc,
InitializationKind Kind =
IsDirectInit
? (CXXDirectInit ? InitializationKind::CreateDirect(
- Loc, Init->getLocStart(), Init->getLocEnd())
+ Loc, Init->getBeginLoc(), Init->getEndLoc())
: InitializationKind::CreateDirectList(Loc))
- : InitializationKind::CreateCopy(Loc, Init->getLocStart());
+ : InitializationKind::CreateCopy(Loc, Init->getBeginLoc());
- MultiExprArg Args = Init;
- if (CXXDirectInit)
- Args =
- MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs());
+ MultiExprArg Args = DeduceInit;
QualType DclT;
InitializationSequence InitSeq(*this, Entity, Kind, Args);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid())
return QualType();
- Init = Result.getAs<Expr>();
-
- // The init-capture initialization is a full-expression that must be
- // processed as one before we enter the declcontext of the lambda's
- // call-operator.
- Result = ActOnFinishFullExpr(Init, Loc, /*DiscardedValue*/ false,
- /*IsConstexpr*/ false,
- /*IsLambdaInitCaptureInitializer*/ true);
- if (Result.isInvalid())
- return QualType();
Init = Result.getAs<Expr>();
return DeducedType;
@@ -856,7 +857,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
EPI.HasTrailingReturn = true;
- EPI.TypeQuals |= DeclSpec::TQ_const;
+ EPI.TypeQuals.addConst();
// C++1y [expr.prim.lambda]:
// The lambda return type is 'auto', which is replaced by the
// trailing-return type if provided and/or deduced from 'return'
@@ -881,8 +882,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// This function call operator is declared const (9.3.1) if and only if
// the lambda-expression's parameter-declaration-clause is not followed
// by mutable. It is neither virtual nor declared volatile. [...]
- if (!FTI.hasMutableQualifier())
- FTI.TypeQuals |= DeclSpec::TQ_const;
+ if (!FTI.hasMutableQualifier()) {
+ FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const,
+ SourceLocation());
+ }
MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
assert(MethodTyInfo && "no type from lambda-declarator");
@@ -1153,7 +1156,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
// Add lambda parameters into scope.
- addLambdaParameters(Method, CurScope);
+ addLambdaParameters(Intro.Captures, Method, CurScope);
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
@@ -1195,7 +1198,7 @@ QualType Sema::getLambdaConversionFunctionResultType(
CallingConv CC = Context.getDefaultCallingConvention(
CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC);
- InvokerExtInfo.TypeQuals = 0;
+ InvokerExtInfo.TypeQuals = Qualifiers();
assert(InvokerExtInfo.RefQualifier == RQ_None &&
"Lambda's call operator should not have a reference qualifier");
return Context.getFunctionType(CallOpProto->getReturnType(),
@@ -1226,7 +1229,8 @@ static void addFunctionPointerConversion(Sema &S,
S.Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
// The conversion function is always const.
- ConvExtInfo.TypeQuals = Qualifiers::Const;
+ ConvExtInfo.TypeQuals = Qualifiers();
+ ConvExtInfo.TypeQuals.addConst();
QualType ConvTy =
S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo);
@@ -1286,29 +1290,23 @@ static void addFunctionPointerConversion(Sema &S,
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
ParmVarDecl *From = CallOperator->getParamDecl(I);
- InvokerParams.push_back(ParmVarDecl::Create(S.Context,
- // Temporarily add to the TU. This is set to the invoker below.
- S.Context.getTranslationUnitDecl(),
- From->getLocStart(),
- From->getLocation(),
- From->getIdentifier(),
- From->getType(),
- From->getTypeSourceInfo(),
- From->getStorageClass(),
- /*DefaultArg=*/nullptr));
+ InvokerParams.push_back(ParmVarDecl::Create(
+ S.Context,
+ // Temporarily add to the TU. This is set to the invoker below.
+ S.Context.getTranslationUnitDecl(), From->getBeginLoc(),
+ From->getLocation(), From->getIdentifier(), From->getType(),
+ From->getTypeSourceInfo(), From->getStorageClass(),
+ /*DefaultArg=*/nullptr));
CallOpConvTL.setParam(I, From);
CallOpConvNameTL.setParam(I, From);
}
- CXXConversionDecl *Conversion
- = CXXConversionDecl::Create(S.Context, Class, Loc,
- DeclarationNameInfo(ConversionName,
- Loc, ConvNameLoc),
- ConvTy,
- ConvTSI,
- /*isInline=*/true, /*isExplicit=*/false,
- /*isConstexpr=*/S.getLangOpts().CPlusPlus17,
- CallOperator->getBody()->getLocEnd());
+ CXXConversionDecl *Conversion = CXXConversionDecl::Create(
+ S.Context, Class, Loc,
+ DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI,
+ /*isInline=*/true, /*isExplicit=*/false,
+ /*isConstexpr=*/S.getLangOpts().CPlusPlus17,
+ CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
@@ -1343,14 +1341,11 @@ static void addFunctionPointerConversion(Sema &S,
// trailing return type of the invoker would require a visitor to rebuild
// the trailing return type and adjusting all back DeclRefExpr's to refer
// to the new static invoker parameters - not the call operator's.
- CXXMethodDecl *Invoke
- = CXXMethodDecl::Create(S.Context, Class, Loc,
- DeclarationNameInfo(InvokerName, Loc),
- InvokerFunctionTy,
- CallOperator->getTypeSourceInfo(),
- SC_Static, /*IsInline=*/true,
- /*IsConstexpr=*/false,
- CallOperator->getBody()->getLocEnd());
+ CXXMethodDecl *Invoke = CXXMethodDecl::Create(
+ S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
+ InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
+ /*IsInline=*/true,
+ /*IsConstexpr=*/false, CallOperator->getBody()->getEndLoc());
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
InvokerParams[I]->setOwningFunction(Invoke);
Invoke->setParams(InvokerParams);
@@ -1383,7 +1378,8 @@ static void addBlockPointerConversion(Sema &S,
FunctionProtoType::ExtProtoInfo ConversionEPI(
S.Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
- ConversionEPI.TypeQuals = Qualifiers::Const;
+ ConversionEPI.TypeQuals = Qualifiers();
+ ConversionEPI.TypeQuals.addConst();
QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI);
SourceLocation Loc = IntroducerRange.getBegin();
@@ -1392,26 +1388,24 @@ static void addBlockPointerConversion(Sema &S,
S.Context.getCanonicalType(BlockPtrTy));
DeclarationNameLoc NameLoc;
NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc);
- CXXConversionDecl *Conversion
- = CXXConversionDecl::Create(S.Context, Class, Loc,
- DeclarationNameInfo(Name, Loc, NameLoc),
- ConvTy,
- S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
- /*isInline=*/true, /*isExplicit=*/false,
- /*isConstexpr=*/false,
- CallOperator->getBody()->getLocEnd());
+ CXXConversionDecl *Conversion = CXXConversionDecl::Create(
+ S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
+ /*isInline=*/true, /*isExplicit=*/false,
+ /*isConstexpr=*/false, CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
Class->addDecl(Conversion);
}
-static ExprResult performLambdaVarCaptureInitialization(Sema &S,
- const Capture &Capture,
- FieldDecl *Field) {
+static ExprResult performLambdaVarCaptureInitialization(
+ Sema &S, const Capture &Capture, FieldDecl *Field,
+ SourceLocation ImplicitCaptureLoc, bool IsImplicitCapture) {
assert(Capture.isVariableCapture() && "not a variable capture");
auto *Var = Capture.getVariable();
- SourceLocation Loc = Capture.getLocation();
+ SourceLocation Loc =
+ IsImplicitCapture ? ImplicitCaptureLoc : Capture.getLocation();
// C++11 [expr.prim.lambda]p21:
// When the lambda-expression is evaluated, the entities that
@@ -1442,7 +1436,7 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope) {
LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back());
ActOnFinishFunctionBody(LSI.CallOperator, Body);
- return BuildLambdaExpr(StartLoc, Body->getLocEnd(), &LSI);
+ return BuildLambdaExpr(StartLoc, Body->getEndLoc(), &LSI);
}
static LambdaCaptureDefault
@@ -1620,8 +1614,8 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
Var, From.getEllipsisLoc()));
Expr *Init = From.getInitExpr();
if (!Init) {
- auto InitResult =
- performLambdaVarCaptureInitialization(*this, From, *CurField);
+ auto InitResult = performLambdaVarCaptureInitialization(
+ *this, From, *CurField, CaptureDefaultLoc, IsImplicit);
if (InitResult.isInvalid())
return ExprError();
Init = InitResult.get();
@@ -1644,7 +1638,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// same parameter and return types as the closure type's function call
// operator.
// FIXME: Fix generic lambda to block conversions.
- if (getLangOpts().Blocks && getLangOpts().ObjC1 && !IsGenericLambda)
+ if (getLangOpts().Blocks && getLangOpts().ObjC && !IsGenericLambda)
addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
// Finalize the lambda class.
@@ -1730,7 +1724,7 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
/*NRVO=*/false),
CurrentLocation, Src);
if (!Init.isInvalid())
- Init = ActOnFinishFullExpr(Init.get());
+ Init = ActOnFinishFullExpr(Init.get(), /*DiscardedValue*/ false);
if (Init.isInvalid())
return ExprError();
@@ -1747,14 +1741,11 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SmallVector<ParmVarDecl *, 4> BlockParams;
for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
ParmVarDecl *From = CallOperator->getParamDecl(I);
- BlockParams.push_back(ParmVarDecl::Create(Context, Block,
- From->getLocStart(),
- From->getLocation(),
- From->getIdentifier(),
- From->getType(),
- From->getTypeSourceInfo(),
- From->getStorageClass(),
- /*DefaultArg=*/nullptr));
+ BlockParams.push_back(ParmVarDecl::Create(
+ Context, Block, From->getBeginLoc(), From->getLocation(),
+ From->getIdentifier(), From->getType(), From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ /*DefaultArg=*/nullptr));
}
Block->setParams(BlockParams);
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 0ab70e9dca37..effccc2f3d38 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -186,9 +186,7 @@ namespace {
list.push_back(UnqualUsingEntry(UD->getNominatedNamespace(), Common));
}
- void done() {
- llvm::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
- }
+ void done() { llvm::sort(list, UnqualUsingEntry::Comparator()); }
typedef ListTy::const_iterator const_iterator;
@@ -1392,23 +1390,25 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() {
return LookupModulesCache;
}
+/// Determine whether the module M is part of the current module from the
+/// perspective of a module-private visibility check.
+static bool isInCurrentModule(const Module *M, const LangOptions &LangOpts) {
+ // If M is the global module fragment of a module that we've not yet finished
+ // parsing, then it must be part of the current module.
+ return M->getTopLevelModuleName() == LangOpts.CurrentModule ||
+ (M->Kind == Module::GlobalModuleFragment && !M->Parent);
+}
+
bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
- for (Module *Merged : Context.getModulesWithMergedDefinition(Def))
+ for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
if (isModuleVisible(Merged))
return true;
return false;
}
bool Sema::hasMergedDefinitionInCurrentModule(NamedDecl *Def) {
- // FIXME: When not in local visibility mode, we can't tell the difference
- // between a declaration being visible because we merged a local copy of
- // the same declaration into it, and it being visible because its owning
- // module is visible.
- if (Def->getModuleOwnershipKind() == Decl::ModuleOwnershipKind::Visible &&
- getLangOpts().ModulesLocalVisibility)
- return true;
- for (Module *Merged : Context.getModulesWithMergedDefinition(Def))
- if (Merged->getTopLevelModuleName() == getLangOpts().CurrentModule)
+ for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
+ if (isInCurrentModule(Merged, getLangOpts()))
return true;
return false;
}
@@ -1428,8 +1428,6 @@ hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
if (!DefaultArg.isInherited() && Modules) {
auto *NonConstD = const_cast<ParmDecl*>(D);
Modules->push_back(S.getOwningModule(NonConstD));
- const auto &Merged = S.Context.getModulesWithMergedDefinition(NonConstD);
- Modules->insert(Modules->end(), Merged.begin(), Merged.end());
}
// If there was a previous default argument, maybe its parameter is visible.
@@ -1464,11 +1462,8 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
HasFilteredRedecls = true;
- if (Modules) {
+ if (Modules)
Modules->push_back(R->getOwningModule());
- const auto &Merged = S.Context.getModulesWithMergedDefinition(R);
- Modules->insert(Modules->end(), Merged.begin(), Merged.end());
- }
}
// Only return false if there is at least one redecl that is not filtered out.
@@ -1519,27 +1514,11 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
assert(D->isHidden() && "should not call this: not in slow case");
Module *DeclModule = SemaRef.getOwningModule(D);
- if (!DeclModule) {
- // A module-private declaration with no owning module means this is in the
- // global module in the C++ Modules TS. This is visible within the same
- // translation unit only.
- // FIXME: Don't assume that "same translation unit" means the same thing
- // as "not from an AST file".
- assert(D->isModulePrivate() && "hidden decl has no module");
- if (!D->isFromASTFile() || SemaRef.hasMergedDefinitionInCurrentModule(D))
- return true;
- } else {
- // If the owning module is visible, and the decl is not module private,
- // then the decl is visible too. (Module private is ignored within the same
- // top-level module.)
- if (D->isModulePrivate()
- ? DeclModule->getTopLevelModuleName() ==
- SemaRef.getLangOpts().CurrentModule ||
- SemaRef.hasMergedDefinitionInCurrentModule(D)
- : SemaRef.isModuleVisible(DeclModule) ||
- SemaRef.hasVisibleMergedDefinition(D))
- return true;
- }
+ assert(DeclModule && "hidden decl has no owning module");
+
+ // If the owning module is visible, the decl is visible.
+ if (SemaRef.isModuleVisible(DeclModule, D->isModulePrivate()))
+ return true;
// Determine whether a decl context is a file context for the purpose of
// visibility. This looks through some (export and linkage spec) transparent
@@ -1589,29 +1568,41 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
return VisibleWithinParent;
}
- // FIXME: All uses of DeclModule below this point should also check merged
- // modules.
- if (!DeclModule)
- return false;
+ return false;
+}
+
+bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
+ // The module might be ordinarily visible. For a module-private query, that
+ // means it is part of the current module. For any other query, that means it
+ // is in our visible module set.
+ if (ModulePrivate) {
+ if (isInCurrentModule(M, getLangOpts()))
+ return true;
+ } else {
+ if (VisibleModules.isVisible(M))
+ return true;
+ }
+
+ // Otherwise, it might be visible by virtue of the query being within a
+ // template instantiation or similar that is permitted to look inside M.
// Find the extra places where we need to look.
- const auto &LookupModules = SemaRef.getLookupModules();
+ const auto &LookupModules = getLookupModules();
if (LookupModules.empty())
return false;
- // If our lookup set contains the decl's module, it's visible.
- if (LookupModules.count(DeclModule))
+ // If our lookup set contains the module, it's visible.
+ if (LookupModules.count(M))
return true;
- // If the declaration isn't exported, it's not visible in any other module.
- if (D->isModulePrivate())
+ // For a module-private query, that's everywhere we get to look.
+ if (ModulePrivate)
return false;
- // Check whether DeclModule is transitively exported to an import of
- // the lookup set.
- return std::any_of(LookupModules.begin(), LookupModules.end(),
- [&](const Module *M) {
- return M->isModuleVisible(DeclModule); });
+ // Check whether M is transitively exported to an import of the lookup set.
+ return llvm::any_of(LookupModules, [&](const Module *LookupM) {
+ return LookupM->isModuleVisible(M);
+ });
}
bool Sema::isVisibleSlow(const NamedDecl *D) {
@@ -3346,38 +3337,29 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
!isa<FunctionTemplateDecl>(Underlying))
continue;
- if (!isVisible(D)) {
- D = findAcceptableDecl(
- *this, D, (Decl::IDNS_Ordinary | Decl::IDNS_OrdinaryFriend));
- if (!D)
- continue;
- if (auto *USD = dyn_cast<UsingShadowDecl>(D))
- Underlying = USD->getTargetDecl();
- }
-
- // If the only declaration here is an ordinary friend, consider
- // it only if it was declared in an associated classes.
- if ((D->getIdentifierNamespace() & Decl::IDNS_Ordinary) == 0) {
- // If it's neither ordinarily visible nor a friend, we can't find it.
- if ((D->getIdentifierNamespace() & Decl::IDNS_OrdinaryFriend) == 0)
- continue;
-
- bool DeclaredInAssociatedClass = false;
- for (Decl *DI = D; DI; DI = DI->getPreviousDecl()) {
- DeclContext *LexDC = DI->getLexicalDeclContext();
- if (isa<CXXRecordDecl>(LexDC) &&
- AssociatedClasses.count(cast<CXXRecordDecl>(LexDC)) &&
- isVisible(cast<NamedDecl>(DI))) {
- DeclaredInAssociatedClass = true;
+ // The declaration is visible to argument-dependent lookup if either
+ // it's ordinarily visible or declared as a friend in an associated
+ // class.
+ bool Visible = false;
+ for (D = D->getMostRecentDecl(); D;
+ D = cast_or_null<NamedDecl>(D->getPreviousDecl())) {
+ if (D->getIdentifierNamespace() & Decl::IDNS_Ordinary) {
+ if (isVisible(D)) {
+ Visible = true;
+ break;
+ }
+ } else if (D->getFriendObjectKind()) {
+ auto *RD = cast<CXXRecordDecl>(D->getLexicalDeclContext());
+ if (AssociatedClasses.count(RD) && isVisible(D)) {
+ Visible = true;
break;
}
}
- if (!DeclaredInAssociatedClass)
- continue;
}
// FIXME: Preserve D as the FoundDecl.
- Result.insert(Underlying);
+ if (Visible)
+ Result.insert(Underlying);
}
}
}
@@ -3628,8 +3610,9 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
// Find results in this base class (and its bases).
ShadowContextRAII Shadow(Visited);
- LookupVisibleDecls(RD, Result, QualifiedNameLookup, true, Consumer,
- Visited, IncludeDependentBases, LoadExternal);
+ LookupVisibleDecls(RD, Result, QualifiedNameLookup, /*InBaseClass=*/true,
+ Consumer, Visited, IncludeDependentBases,
+ LoadExternal);
}
}
@@ -3998,9 +3981,9 @@ void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND,
// Compute an upper bound on the allowable edit distance, so that the
// edit-distance algorithm can short-circuit.
- unsigned UpperBound = (TypoStr.size() + 2) / 3 + 1;
+ unsigned UpperBound = (TypoStr.size() + 2) / 3;
unsigned ED = TypoStr.edit_distance(Name, true, UpperBound);
- if (ED >= UpperBound) return;
+ if (ED > UpperBound) return;
TypoCorrection TC(&SemaRef.Context.Idents.get(Name), ND, NNS, ED);
if (isKeyword) TC.makeKeyword();
@@ -4070,7 +4053,7 @@ void TypoCorrectionConsumer::addNamespaces(
}
// Do not transform this into an iterator-based loop. The loop body can
// trigger the creation of further types (through lazy deserialization) and
- // invalide iterators into this list.
+ // invalid iterators into this list.
auto &Types = SemaRef.getASTContext().getTypes();
for (unsigned I = 0; I != Types.size(); ++I) {
const auto *TI = Types[I];
@@ -4211,7 +4194,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() {
SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy());
OldOStream << Typo->getName();
// If correction candidate would be an identical written qualified
- // identifer, then the existing CXXScopeSpec probably included a
+ // identifier, then the existing CXXScopeSpec probably included a
// typedef that didn't get accounted for properly.
if (OldOStream.str() == NewQualified)
break;
@@ -4628,7 +4611,7 @@ std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer(
getLangOpts().ModulesSearchAll) {
// The following has the side effect of loading the missing module.
getModuleLoader().lookupMissingImports(Typo->getName(),
- TypoName.getLocStart());
+ TypoName.getBeginLoc());
}
CorrectionCandidateCallback &CCCRef = *CCC;
@@ -5061,12 +5044,12 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
if (!Def)
Def = Decl;
- Module *Owner = getOwningModule(Decl);
+ Module *Owner = getOwningModule(Def);
assert(Owner && "definition of hidden declaration is not in a module");
llvm::SmallVector<Module*, 8> OwningModules;
OwningModules.push_back(Owner);
- auto Merged = Context.getModulesWithMergedDefinition(Decl);
+ auto Merged = Context.getModulesWithMergedDefinition(Def);
OwningModules.insert(OwningModules.end(), Merged.begin(), Merged.end());
diagnoseMissingImport(Loc, Decl, Decl->getLocation(), OwningModules, MIK,
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index 3e55cf003fce..9412d0160048 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -609,12 +609,12 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
}
if (T->isObjCObjectType()) {
- SourceLocation StarLoc = TInfo->getTypeLoc().getLocEnd();
+ SourceLocation StarLoc = TInfo->getTypeLoc().getEndLoc();
StarLoc = getLocForEndOfToken(StarLoc);
Diag(FD.D.getIdentifierLoc(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(StarLoc, "*");
T = Context.getObjCObjectPointerType(T);
- SourceLocation TLoc = TInfo->getTypeLoc().getLocStart();
+ SourceLocation TLoc = TInfo->getTypeLoc().getBeginLoc();
TInfo = Context.getTrivialTypeSourceInfo(T, TLoc);
}
@@ -1061,7 +1061,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PropertyIvarLoc = PropertyLoc;
SourceLocation PropertyDiagLoc = PropertyLoc;
if (PropertyDiagLoc.isInvalid())
- PropertyDiagLoc = ClassImpDecl->getLocStart();
+ PropertyDiagLoc = ClassImpDecl->getBeginLoc();
ObjCPropertyDecl *property = nullptr;
ObjCInterfaceDecl *IDecl = nullptr;
// Find the class or category class where this property must have
@@ -1412,9 +1412,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// FIXME. Eventually we want to do this for Objective-C as well.
SynthesizedFunctionScope Scope(*this, getterMethod);
ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
- DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
- VK_LValue, PropertyDiagLoc);
+ DeclRefExpr *SelfExpr = new (Context)
+ DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ PropertyDiagLoc);
MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr =
ImplicitCastExpr::Create(Context, SelfDecl->getType(),
@@ -1464,9 +1464,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// FIXME. Eventually we want to do this for Objective-C as well.
SynthesizedFunctionScope Scope(*this, setterMethod);
ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
- DeclRefExpr *SelfExpr =
- new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
- VK_LValue, PropertyDiagLoc);
+ DeclRefExpr *SelfExpr = new (Context)
+ DeclRefExpr(Context, SelfDecl, false, SelfDecl->getType(), VK_LValue,
+ PropertyDiagLoc);
MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr =
ImplicitCastExpr::Create(Context, SelfDecl->getType(),
@@ -1481,8 +1481,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
ParmVarDecl *Param = (*P);
QualType T = Param->getType().getNonReferenceType();
- DeclRefExpr *rhs = new (Context) DeclRefExpr(Param, false, T,
- VK_LValue, PropertyDiagLoc);
+ DeclRefExpr *rhs = new (Context)
+ DeclRefExpr(Context, Param, false, T, VK_LValue, PropertyDiagLoc);
MarkDeclRefReferenced(rhs);
ExprResult Res = BuildBinOp(S, PropertyDiagLoc,
BO_Assign, lhs, rhs);
@@ -1497,8 +1497,8 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
Diag(PropertyDiagLoc,
diag::err_atomic_property_nontrivial_assign_op)
<< property->getType();
- Diag(FuncDecl->getLocStart(),
- diag::note_callee_decl) << FuncDecl;
+ Diag(FuncDecl->getBeginLoc(), diag::note_callee_decl)
+ << FuncDecl;
}
}
PIDecl->setSetterCXXAssignment(Res.getAs<Expr>());
@@ -2100,7 +2100,7 @@ void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl)
!impDecl->getInstanceMethod(getterMethod->getSelector())) {
SourceLocation loc = propertyImpl->getLocation();
if (loc.isInvalid())
- loc = impDecl->getLocStart();
+ loc = impDecl->getBeginLoc();
Diag(loc, diag::warn_null_resettable_setter)
<< setterMethod->getSelector() << property->getDeclName();
@@ -2235,7 +2235,7 @@ void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D
if (getterRedecl->getDeclContext() != PD->getDeclContext())
continue;
noteLoc = getterRedecl->getLocation();
- fixItLoc = getterRedecl->getLocEnd();
+ fixItLoc = getterRedecl->getEndLoc();
}
Preprocessor &PP = getPreprocessor();
@@ -2384,7 +2384,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
QualType modifiedTy = resultTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)) {
if (*nullability == NullabilityKind::Unspecified)
- resultTy = Context.getAttributedType(AttributedType::attr_nonnull,
+ resultTy = Context.getAttributedType(attr::TypeNonNull,
modifiedTy, modifiedTy);
}
}
@@ -2458,7 +2458,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
QualType modifiedTy = paramTy;
if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)){
if (*nullability == NullabilityKind::Unspecified)
- paramTy = Context.getAttributedType(AttributedType::attr_nullable,
+ paramTy = Context.getAttributedType(attr::TypeNullable,
modifiedTy, modifiedTy);
}
}
@@ -2557,6 +2557,14 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
PropertyDecl->setInvalidDecl();
}
+ // Check for assign on object types.
+ if ((Attributes & ObjCDeclSpec::DQ_PR_assign) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
+ PropertyTy->isObjCRetainableType() &&
+ !PropertyTy->isObjCARCImplicitlyUnretainedType()) {
+ Diag(Loc, diag::warn_objc_property_assign_on_object);
+ }
+
// Check for more than one of { assign, copy, retain }.
if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index e1a4c420d402..36048a38b999 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
@@ -73,6 +74,8 @@ public:
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
+ using DoacrossDependMapTy =
+ llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
private:
struct DSAInfo {
@@ -97,8 +100,6 @@ private:
llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
using CriticalsWithHintsTy =
llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
- using DoacrossDependMapTy =
- llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
struct ReductionData {
using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
SourceRange ReductionRange;
@@ -137,13 +138,16 @@ private:
/// first argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
- llvm::PointerIntPair<const Expr *, 1, bool> OrderedRegion;
+ llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
+ unsigned AssociatedLoops = 1;
+ const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
bool CancelRegion = false;
- unsigned AssociatedLoops = 1;
+ bool LoopStart = false;
SourceLocation InnerTeamsRegionLoc;
/// Reference to the taskgroup task_reduction reference expression.
Expr *TaskgroupReductionRef = nullptr;
+ llvm::DenseSet<QualType> MappedClassesQualTypes;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -162,6 +166,9 @@ private:
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
Sema &SemaRef;
bool ForceCapturing = false;
+ /// true if all the vaiables in the target executable directives must be
+ /// captured by reference.
+ bool ForceCaptureByReferenceInTargetExecutable = false;
CriticalsWithHintsTy Criticals;
using iterator = StackTy::const_reverse_iterator;
@@ -177,6 +184,9 @@ private:
Stack.back().first.empty();
}
+ /// Vector of previously declared requires directives
+ SmallVector<const OMPRequiresDecl *, 2> RequiresDecls;
+
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
@@ -190,6 +200,13 @@ public:
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
+ void setForceCaptureByReferenceInTargetExecutable(bool V) {
+ ForceCaptureByReferenceInTargetExecutable = V;
+ }
+ bool isForceCaptureByReferenceInTargetExecutable() const {
+ return ForceCaptureByReferenceInTargetExecutable;
+ }
+
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
if (Stack.empty() ||
@@ -205,6 +222,33 @@ public:
Stack.back().first.pop_back();
}
+ /// Marks that we're started loop parsing.
+ void loopInit() {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ Stack.back().first.back().LoopStart = true;
+ }
+ /// Start capturing of the variables in the loop context.
+ void loopStart() {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ Stack.back().first.back().LoopStart = false;
+ }
+ /// true, if variables are captured, false otherwise.
+ bool isLoopStarted() const {
+ assert(isOpenMPLoopDirective(getCurrentDirective()) &&
+ "Expected loop-based directive.");
+ return !Stack.back().first.back().LoopStart;
+ }
+ /// Marks (or clears) declaration as possibly loop counter.
+ void resetPossibleLoopCounter(const Decl *D = nullptr) {
+ Stack.back().first.back().PossiblyLoopCounter =
+ D ? D->getCanonicalDecl() : D;
+ }
+ /// Gets the possible loop counter decl.
+ const Decl *getPossiblyLoopCunter() const {
+ return Stack.back().first.back().PossiblyLoopCounter;
+ }
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
@@ -352,6 +396,33 @@ public:
return std::next(Stack.back().first.rbegin())->Directive;
}
+ /// Add requires decl to internal vector
+ void addRequiresDecl(OMPRequiresDecl *RD) {
+ RequiresDecls.push_back(RD);
+ }
+
+ /// Checks for a duplicate clause amongst previously declared requires
+ /// directives
+ bool hasDuplicateRequiresClause(ArrayRef<OMPClause *> ClauseList) const {
+ bool IsDuplicate = false;
+ for (OMPClause *CNew : ClauseList) {
+ for (const OMPRequiresDecl *D : RequiresDecls) {
+ for (const OMPClause *CPrev : D->clauselists()) {
+ if (CNew->getClauseKind() == CPrev->getClauseKind()) {
+ SemaRef.Diag(CNew->getBeginLoc(),
+ diag::err_omp_requires_clause_redeclaration)
+ << getOpenMPClauseName(CNew->getClauseKind());
+ SemaRef.Diag(CPrev->getBeginLoc(),
+ diag::note_omp_requires_previous_clause)
+ << getOpenMPClauseName(CPrev->getClauseKind());
+ IsDuplicate = true;
+ }
+ }
+ }
+ }
+ return IsDuplicate;
+ }
+
/// Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
assert(!isStackEmpty());
@@ -398,23 +469,42 @@ public:
}
/// Marks current region as ordered (it has an 'ordered' clause).
- void setOrderedRegion(bool IsOrdered, const Expr *Param) {
+ void setOrderedRegion(bool IsOrdered, const Expr *Param,
+ OMPOrderedClause *Clause) {
assert(!isStackEmpty());
- Stack.back().first.back().OrderedRegion.setInt(IsOrdered);
- Stack.back().first.back().OrderedRegion.setPointer(Param);
+ if (IsOrdered)
+ Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
+ else
+ Stack.back().first.back().OrderedRegion.reset();
+ }
+ /// Returns true, if region is ordered (has associated 'ordered' clause),
+ /// false - otherwise.
+ bool isOrderedRegion() const {
+ if (isStackEmpty())
+ return false;
+ return Stack.back().first.rbegin()->OrderedRegion.hasValue();
+ }
+ /// Returns optional parameter for the ordered region.
+ std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
+ if (isStackEmpty() ||
+ !Stack.back().first.rbegin()->OrderedRegion.hasValue())
+ return std::make_pair(nullptr, nullptr);
+ return Stack.back().first.rbegin()->OrderedRegion.getValue();
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
- return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt();
+ return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
}
/// Returns optional parameter for the ordered region.
- const Expr *getParentOrderedRegionParam() const {
- if (isStackEmpty() || Stack.back().first.size() == 1)
- return nullptr;
- return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer();
+ std::pair<const Expr *, OMPOrderedClause *>
+ getParentOrderedRegionParam() const {
+ if (isStackEmpty() || Stack.back().first.size() == 1 ||
+ !std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
+ return std::make_pair(nullptr, nullptr);
+ return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
}
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
@@ -572,17 +662,34 @@ public:
return llvm::make_range(StackElem.DoacrossDepends.end(),
StackElem.DoacrossDepends.end());
}
+
+ // Store types of classes which have been explicitly mapped
+ void addMappedClassesQualTypes(QualType QT) {
+ SharingMapTy &StackElem = Stack.back().first.back();
+ StackElem.MappedClassesQualTypes.insert(QT);
+ }
+
+ // Return set of mapped classes types
+ bool isClassPreviouslyMapped(QualType QT) const {
+ const SharingMapTy &StackElem = Stack.back().first.back();
+ return StackElem.MappedClassesQualTypes.count(QT) != 0;
+ }
+
};
-bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
- return isOpenMPParallelDirective(DKind) || isOpenMPTaskingDirective(DKind) ||
- isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown;
+
+bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
+ return isOpenMPParallelDirective(DKind) || isOpenMPTeamsDirective(DKind);
+}
+
+bool isImplicitOrExplicitTaskingRegion(OpenMPDirectiveKind DKind) {
+ return isImplicitTaskingRegion(DKind) || isOpenMPTaskingDirective(DKind) || DKind == OMPD_unknown;
}
} // namespace
static const Expr *getExprAsWritten(const Expr *E) {
- if (const auto *ExprTemp = dyn_cast<ExprWithCleanups>(E))
- E = ExprTemp->getSubExpr();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E))
E = MTE->GetTemporaryExpr();
@@ -716,7 +823,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(iterator &Iter,
DVar.CKind = OMPC_firstprivate;
return DVar;
}
- } while (I != E && !isParallelOrTaskRegion(I->Directive));
+ } while (I != E && !isImplicitTaskingRegion(I->Directive));
DVar.CKind =
(DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
return DVar;
@@ -963,7 +1070,7 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
if (!isStackEmpty()) {
iterator I = Iter, E = Stack.back().first.rend();
Scope *TopScope = nullptr;
- while (I != E && !isParallelOrTaskRegion(I->Directive) &&
+ while (I != E && !isImplicitOrExplicitTaskingRegion(I->Directive) &&
!isOpenMPTargetExecutionDirective(I->Directive))
++I;
if (I == E)
@@ -977,6 +1084,51 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, iterator Iter) const {
return false;
}
+static bool isConstNotMutableType(Sema &SemaRef, QualType Type,
+ bool AcceptIfMutable = true,
+ bool *IsClassType = nullptr) {
+ ASTContext &Context = SemaRef.getASTContext();
+ Type = Type.getNonReferenceType().getCanonicalType();
+ bool IsConstant = Type.isConstant(Context);
+ Type = Context.getBaseElementType(Type);
+ const CXXRecordDecl *RD = AcceptIfMutable && SemaRef.getLangOpts().CPlusPlus
+ ? Type->getAsCXXRecordDecl()
+ : nullptr;
+ if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
+ if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
+ RD = CTD->getTemplatedDecl();
+ if (IsClassType)
+ *IsClassType = RD;
+ return IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD &&
+ RD->hasDefinition() && RD->hasMutableFields());
+}
+
+static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
+ QualType Type, OpenMPClauseKind CKind,
+ SourceLocation ELoc,
+ bool AcceptIfMutable = true,
+ bool ListItemNotVar = false) {
+ ASTContext &Context = SemaRef.getASTContext();
+ bool IsClassType;
+ if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
+ unsigned Diag = ListItemNotVar
+ ? diag::err_omp_const_list_item
+ : IsClassType ? diag::err_omp_const_not_mutable_variable
+ : diag::err_omp_const_variable;
+ SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
+ if (!ListItemNotVar && D) {
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
+ SemaRef.Diag(D->getLocation(),
+ IsDecl ? diag::note_previous_decl : diag::note_defined_here)
+ << D;
+ }
+ return true;
+ }
+ return false;
+}
+
const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
bool FromParent) {
D = getCanonicalDecl(D);
@@ -1074,31 +1226,28 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
return DVar;
}
- QualType Type = D->getType().getNonReferenceType().getCanonicalType();
- bool IsConstant = Type.isConstant(SemaRef.getASTContext());
- Type = SemaRef.getASTContext().getBaseElementType(Type);
- // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct, C/C++, predetermined, p.6]
- // Variables with const qualified type having no mutable member are
- // shared.
- const CXXRecordDecl *RD =
- SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr;
- if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(RD))
- if (const ClassTemplateDecl *CTD = CTSD->getSpecializedTemplate())
- RD = CTD->getTemplatedDecl();
- if (IsConstant &&
- !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasDefinition() &&
- RD->hasMutableFields())) {
- // Variables with const-qualified type having no mutable member may be
- // listed in a firstprivate clause, even if they are static data members.
- DSAVarData DVarTemp =
- hasDSA(D, [](OpenMPClauseKind C) { return C == OMPC_firstprivate; },
- MatchesAlways, FromParent);
- if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr)
- return DVarTemp;
+ // The predetermined shared attribute for const-qualified types having no
+ // mutable members was removed after OpenMP 3.1.
+ if (SemaRef.LangOpts.OpenMP <= 31) {
+ // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct, C/C++, predetermined, p.6]
+ // Variables with const qualified type having no mutable member are
+ // shared.
+ if (isConstNotMutableType(SemaRef, D->getType())) {
+ // Variables with const-qualified type having no mutable member may be
+ // listed in a firstprivate clause, even if they are static data members.
+ DSAVarData DVarTemp = hasInnermostDSA(
+ D,
+ [](OpenMPClauseKind C) {
+ return C == OMPC_firstprivate || C == OMPC_shared;
+ },
+ MatchesAlways, FromParent);
+ if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr)
+ return DVarTemp;
- DVar.CKind = OMPC_shared;
- return DVar;
+ DVar.CKind = OMPC_shared;
+ return DVar;
+ }
}
// Explicitly specified attributes and local variables with predetermined
@@ -1147,7 +1296,7 @@ DSAStackTy::hasDSA(ValueDecl *D,
if (FromParent && I != EndI)
std::advance(I, 1);
for (; I != EndI; std::advance(I, 1)) {
- if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
+ if (!DPred(I->Directive) && !isImplicitOrExplicitTaskingRegion(I->Directive))
continue;
iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
@@ -1187,10 +1336,16 @@ bool DSAStackTy::hasExplicitDSA(
return false;
std::advance(StartI, Level);
auto I = StartI->SharingMap.find(D);
- return (I != StartI->SharingMap.end()) &&
+ if ((I != StartI->SharingMap.end()) &&
I->getSecond().RefExpr.getPointer() &&
CPred(I->getSecond().Attributes) &&
- (!NotLastprivate || !I->getSecond().RefExpr.getInt());
+ (!NotLastprivate || !I->getSecond().RefExpr.getInt()))
+ return true;
+ // Check predetermined rules for the loop control variables.
+ auto LI = StartI->LCVMap.find(D);
+ if (LI != StartI->LCVMap.end())
+ return CPred(OMPC_private);
+ return false;
}
bool DSAStackTy::hasExplicitDirective(
@@ -1239,17 +1394,6 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
-static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
-isDeclareTargetDeclaration(const ValueDecl *VD) {
- for (const Decl *D : VD->redecls()) {
- if (!D->hasAttrs())
- continue;
- if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
- return Attr->getMapType();
- }
- return llvm::None;
-}
-
bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
@@ -1368,6 +1512,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
// By default, all the data that has a scalar type is mapped by copy
// (except for reduction variables).
IsByRef =
+ (DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
+ !Ty->isAnyPointerType()) ||
!Ty->isScalarType() ||
DSAStack->getDefaultDMAAtLevel(Level) == DMA_tofrom_scalar ||
DSAStack->hasExplicitDSA(
@@ -1377,10 +1523,12 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
if (IsByRef && Ty.getNonReferenceType()->isScalarType()) {
IsByRef =
- !DSAStack->hasExplicitDSA(
- D,
- [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
- Level, /*NotLastprivate=*/true) &&
+ ((DSAStack->isForceCaptureByReferenceInTargetExecutable() &&
+ !Ty->isAnyPointerType()) ||
+ !DSAStack->hasExplicitDSA(
+ D,
+ [](OpenMPClauseKind K) -> bool { return K == OMPC_firstprivate; },
+ Level, /*NotLastprivate=*/true)) &&
// If the variable is artificial and must be captured by value - try to
// capture by value.
!(isa<OMPCapturedExprDecl>(D) && !D->hasAttr<OMPCaptureNoInitAttr>() &&
@@ -1417,7 +1565,7 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
-VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
+VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
D = getCanonicalDecl(D);
@@ -1425,13 +1573,65 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
// 'target' we return true so that this global is also mapped to the device.
//
auto *VD = dyn_cast<VarDecl>(D);
- if (VD && !VD->hasLocalStorage() && isInOpenMPTargetExecutionDirective()) {
- // If the declaration is enclosed in a 'declare target' directive,
- // then it should not be captured.
- //
- if (isDeclareTargetDeclaration(VD))
+ if (VD && !VD->hasLocalStorage()) {
+ if (isInOpenMPDeclareTargetContext() &&
+ (getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
+ // Try to mark variable as declare target if it is used in capturing
+ // regions.
+ if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
return nullptr;
- return VD;
+ } else if (isInOpenMPTargetExecutionDirective()) {
+ // If the declaration is enclosed in a 'declare target' directive,
+ // then it should not be captured.
+ //
+ if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ return nullptr;
+ return VD;
+ }
+ }
+ // Capture variables captured by reference in lambdas for target-based
+ // directives.
+ if (VD && !DSAStack->isClauseParsingMode()) {
+ if (const auto *RD = VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl()) {
+ bool SavedForceCaptureByReferenceInTargetExecutable =
+ DSAStack->isForceCaptureByReferenceInTargetExecutable();
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(/*V=*/true);
+ if (RD->isLambda()) {
+ llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ FieldDecl *ThisCapture;
+ RD->getCaptureFields(Captures, ThisCapture);
+ for (const LambdaCapture &LC : RD->captures()) {
+ if (LC.getCaptureKind() == LCK_ByRef) {
+ VarDecl *VD = LC.getCapturedVar();
+ DeclContext *VDC = VD->getDeclContext();
+ if (!VDC->Encloses(CurContext))
+ continue;
+ DSAStackTy::DSAVarData DVarPrivate =
+ DSAStack->getTopDSA(VD, /*FromParent=*/false);
+ // Do not capture already captured variables.
+ if (!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
+ DVarPrivate.CKind == OMPC_unknown &&
+ !DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [](OMPClauseMappableExprCommon::
+ MappableExprComponentListRef,
+ OpenMPClauseKind) { return true; }))
+ MarkVariableReferenced(LC.getLocation(), LC.getCapturedVar());
+ } else if (LC.getCaptureKind() == LCK_This) {
+ QualType ThisTy = getCurrentThisType();
+ if (!ThisTy.isNull() &&
+ Context.typesAreCompatible(ThisTy, ThisCapture->getType()))
+ CheckCXXThisCapture(LC.getLocation());
+ }
+ }
+ }
+ DSAStack->setForceCaptureByReferenceInTargetExecutable(
+ SavedForceCaptureByReferenceInTargetExecutable);
+ }
}
if (DSAStack->getCurrentDirective() != OMPD_unknown &&
@@ -1440,7 +1640,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
auto &&Info = DSAStack->isLoopControlVariable(D);
if (Info.first ||
(VD && VD->hasLocalStorage() &&
- isParallelOrTaskRegion(DSAStack->getCurrentDirective())) ||
+ isImplicitOrExplicitTaskingRegion(DSAStack->getCurrentDirective())) ||
(VD && DSAStack->isForceVarCapturing()))
return VD ? VD : Info.second;
DSAStackTy::DSAVarData DVarPrivate =
@@ -1463,8 +1663,28 @@ void Sema::adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
FunctionScopesIndex -= Regions.size();
}
+void Sema::startOpenMPLoop() {
+ assert(LangOpts.OpenMP && "OpenMP must be enabled.");
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()))
+ DSAStack->loopInit();
+}
+
bool Sema::isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
+ if (DSAStack->getAssociatedLoops() > 0 &&
+ !DSAStack->isLoopStarted()) {
+ DSAStack->resetPossibleLoopCounter(D);
+ DSAStack->loopStart();
+ return true;
+ }
+ if ((DSAStack->getPossiblyLoopCunter() == D->getCanonicalDecl() ||
+ DSAStack->isLoopControlVariable(D).first) &&
+ !DSAStack->hasExplicitDSA(
+ D, [](OpenMPClauseKind K) { return K != OMPC_private; }, Level) &&
+ !isOpenMPSimdDirective(DSAStack->getCurrentDirective()))
+ return true;
+ }
return DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K) { return K == OMPC_private; }, Level) ||
(DSAStack->isClauseParsingMode() &&
@@ -1780,7 +2000,7 @@ public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
if (const auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
- SemaRef.Diag(E->getLocStart(),
+ SemaRef.Diag(E->getBeginLoc(),
diag::err_omp_local_var_in_threadprivate_init)
<< E->getSourceRange();
SemaRef.Diag(VD->getLocation(), diag::note_defined_here)
@@ -1882,6 +2102,30 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) {
return D;
}
+Sema::DeclGroupPtrTy
+Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
+ ArrayRef<OMPClause *> ClauseList) {
+ OMPRequiresDecl *D = nullptr;
+ if (!CurContext->isFileContext()) {
+ Diag(Loc, diag::err_omp_invalid_scope) << "requires";
+ } else {
+ D = CheckOMPRequiresDecl(Loc, ClauseList);
+ if (D) {
+ CurContext->addDecl(D);
+ DSAStack->addRequiresDecl(D);
+ }
+ }
+ return DeclGroupPtrTy::make(DeclGroupRef(D));
+}
+
+OMPRequiresDecl *Sema::CheckOMPRequiresDecl(SourceLocation Loc,
+ ArrayRef<OMPClause *> ClauseList) {
+ if (!DSAStack->hasDuplicateRequiresClause(ClauseList))
+ return OMPRequiresDecl::Create(Context, getCurLexicalContext(), Loc,
+ ClauseList);
+ return nullptr;
+}
+
static void reportOriginalDsa(Sema &SemaRef, const DSAStackTy *Stack,
const ValueDecl *D,
const DSAStackTy::DSAVarData &DVar,
@@ -1950,6 +2194,30 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
Sema::VarsWithInheritedDSAType VarsWithInheritedDSA;
llvm::SmallDenseSet<const ValueDecl *, 4> ImplicitDeclarations;
+ void VisitSubCaptures(OMPExecutableDirective *S) {
+ // Check implicitly captured variables.
+ if (!S->hasAssociatedStmt() || !S->getAssociatedStmt())
+ return;
+ for (const CapturedStmt::Capture &Cap :
+ S->getInnermostCapturedStmt()->captures()) {
+ if (!Cap.capturesVariable())
+ continue;
+ VarDecl *VD = Cap.getCapturedVar();
+ // Do not try to map the variable if it or its sub-component was mapped
+ // already.
+ if (isOpenMPTargetExecutionDirective(Stack->getCurrentDirective()) &&
+ Stack->checkMappableExprComponentListsForDecl(
+ VD, /*CurrentRegionOnly=*/true,
+ [](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind) { return true; }))
+ continue;
+ DeclRefExpr *DRE = buildDeclRefExpr(
+ SemaRef, VD, VD->getType().getNonLValueExprType(SemaRef.Context),
+ Cap.getLocation(), /*RefersToCapture=*/true);
+ Visit(DRE);
+ }
+ }
+
public:
void VisitDeclRefExpr(DeclRefExpr *E) {
if (E->isTypeDependent() || E->isValueDependent() ||
@@ -1968,7 +2236,7 @@ public:
// Skip internally declared static variables.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- isDeclareTargetDeclaration(VD);
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
(!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
return;
@@ -1980,7 +2248,7 @@ public:
// attribute, must have its data-sharing attribute explicitly determined
// by being listed in a data-sharing attribute clause.
if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none &&
- isParallelOrTaskRegion(DKind) &&
+ isImplicitOrExplicitTaskingRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
VarsWithInheritedDSA[VD] = E;
return;
@@ -2057,7 +2325,7 @@ public:
return;
auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
OpenMPDirectiveKind DKind = Stack->getCurrentDirective();
- if (isa<CXXThisExpr>(E->getBase()->IgnoreParens())) {
+ if (auto *TE = dyn_cast<CXXThisExpr>(E->getBase()->IgnoreParens())) {
if (!FD)
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(FD, /*FromParent=*/false);
@@ -2084,6 +2352,12 @@ public:
//
if (FD->isBitField())
return;
+
+ // Check to see if the member expression is referencing a class that
+ // has already been explicitly mapped
+ if (Stack->isClassPreviouslyMapped(TE->getType()))
+ return;
+
ImplicitMap.emplace_back(E);
return;
}
@@ -2110,8 +2384,14 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(FD, /*FromParent=*/false);
if (isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared &&
- !Stack->isLoopControlVariable(FD).first)
- ImplicitFirstprivate.push_back(E);
+ !Stack->isLoopControlVariable(FD).first) {
+ // Check if there is a captured expression for the current field in the
+ // region. Do not mark it as firstprivate unless there is no captured
+ // expression.
+ // TODO: try to make it firstprivate.
+ if (DVar.CKind != OMPC_unknown)
+ ImplicitFirstprivate.push_back(E);
+ }
return;
}
if (isOpenMPTargetExecutionDirective(DKind)) {
@@ -2171,11 +2451,16 @@ public:
}
}
}
+ // Check implicitly captured variables.
+ VisitSubCaptures(S);
}
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
- if (C && !isa<OMPExecutableDirective>(C))
+ if (C) {
+ // Check implicitly captured variables in the task-based directives to
+ // check if they must be firstprivatized.
Visit(C);
+ }
}
}
@@ -2527,6 +2812,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_declare_simd:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_requires:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -2560,7 +2846,7 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
WithInit = true;
}
auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
- CaptureExpr->getLocStart());
+ CaptureExpr->getBeginLoc());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
S.CurContext->addHiddenDecl(CED);
@@ -2697,20 +2983,20 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
? SC->getFirstScheduleModifierLoc()
: SC->getSecondScheduleModifierLoc(),
diag::err_omp_schedule_nonmonotonic_ordered)
- << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
ErrorFound = true;
}
if (!LCs.empty() && OC && OC->getNumForLoops()) {
for (const OMPLinearClause *C : LCs) {
- Diag(C->getLocStart(), diag::err_omp_linear_ordered)
- << SourceRange(OC->getLocStart(), OC->getLocEnd());
+ Diag(C->getBeginLoc(), diag::err_omp_linear_ordered)
+ << SourceRange(OC->getBeginLoc(), OC->getEndLoc());
}
ErrorFound = true;
}
if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
OC->getNumForLoops()) {
- Diag(OC->getLocStart(), diag::err_omp_ordered_simd)
+ Diag(OC->getBeginLoc(), diag::err_omp_ordered_simd)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
ErrorFound = true;
}
@@ -2812,11 +3098,13 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
}
return false;
}
- // Allow some constructs (except teams) to be orphaned (they could be
- // used in functions, called from OpenMP regions with the required
- // preconditions).
+ // Allow some constructs (except teams and cancellation constructs) to be
+ // orphaned (they could be used in functions, called from OpenMP regions
+ // with the required preconditions).
if (ParentRegion == OMPD_unknown &&
- !isOpenMPNestingTeamsDirective(CurrentRegion))
+ !isOpenMPNestingTeamsDirective(CurrentRegion) &&
+ CurrentRegion != OMPD_cancellation_point &&
+ CurrentRegion != OMPD_cancel)
return false;
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
@@ -2845,6 +3133,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
(CancelRegion == OMPD_sections &&
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
ParentRegion == OMPD_parallel_sections)));
+ OrphanSeen = ParentRegion == OMPD_unknown;
} else if (CurrentRegion == OMPD_master) {
// OpenMP [2.16, Nesting of Regions]
// A master region may not be closely nested inside a worksharing,
@@ -2989,7 +3278,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
// the directive.
OpenMPDirectiveKind CurNM = IC->getNameModifier();
if (FoundNameModifiers[CurNM]) {
- S.Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ S.Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(Kind) << getOpenMPClauseName(OMPC_if)
<< (CurNM != OMPD_unknown) << getOpenMPDirectiveName(CurNM);
ErrorFound = true;
@@ -3023,7 +3312,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
// all if clauses on the directive must include a directive-name-modifier.
if (FoundNameModifiers[OMPD_unknown] && NamedModifiersNumber > 0) {
if (NamedModifiersNumber == AllowedNameModifiers.size()) {
- S.Diag(FoundNameModifiers[OMPD_unknown]->getLocStart(),
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getBeginLoc(),
diag::err_omp_no_more_if_clause);
} else {
std::string Values;
@@ -3045,7 +3334,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
++AllowedCnt;
}
}
- S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getLocStart(),
+ S.Diag(FoundNameModifiers[OMPD_unknown]->getCondition()->getBeginLoc(),
diag::err_omp_unnamed_if_clause)
<< (TotalAllowedNum > 1) << Values;
}
@@ -3113,9 +3402,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
if (!ImplicitMaps.empty()) {
if (OMPClause *Implicit = ActOnOpenMPMapClause(
- OMPC_MAP_unknown, OMPC_MAP_tofrom, /*IsMapTypeImplicit=*/true,
- SourceLocation(), SourceLocation(), ImplicitMaps,
- SourceLocation(), SourceLocation(), SourceLocation())) {
+ llvm::None, llvm::None, OMPC_MAP_tofrom,
+ /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
+ ImplicitMaps, SourceLocation(), SourceLocation(),
+ SourceLocation())) {
ClausesWithImplicit.emplace_back(Implicit);
ErrorFound |=
cast<OMPMapClause>(Implicit)->varlist_size() != ImplicitMaps.size();
@@ -3368,6 +3658,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPD_threadprivate:
case OMPD_declare_reduction:
case OMPD_declare_simd:
+ case OMPD_requires:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -3687,7 +3978,8 @@ class OpenMPIterationSpaceChecker {
/// Var <= UB
/// UB > Var
/// UB >= Var
- bool TestIsLessOp = false;
+ /// This will have no value when the condition is !=
+ llvm::Optional<bool> TestIsLessOp;
/// This flag is true when condition is strict ( < or > ).
bool TestIsStrictOp = false;
/// This flag is true when step is subtracted on each iteration.
@@ -3736,6 +4028,13 @@ public:
Expr *buildCounterInit() const;
/// Build step of the counter be used for codegen.
Expr *buildCounterStep() const;
+ /// Build loop data with counter value for depend clauses in ordered
+ /// directives.
+ Expr *
+ buildOrderedLoopData(Scope *S, Expr *Counter,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
+ SourceLocation Loc, Expr *Inc = nullptr,
+ OverloadedOperatorKind OOK = OO_Amp);
/// Return true if any expression is dependent.
bool dependent() const;
@@ -3746,8 +4045,8 @@ private:
/// Helper to set loop counter variable and its initializer.
bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB);
/// Helper to set upper bound.
- bool setUB(Expr *NewUB, bool LessOp, bool StrictOp, SourceRange SR,
- SourceLocation SL);
+ bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
+ SourceRange SR, SourceLocation SL);
/// Helper to set loop increment.
bool setStep(Expr *NewStep, bool Subtract);
};
@@ -3782,15 +4081,17 @@ bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
return false;
}
-bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, bool LessOp, bool StrictOp,
- SourceRange SR, SourceLocation SL) {
+bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, llvm::Optional<bool> LessOp,
+ bool StrictOp, SourceRange SR,
+ SourceLocation SL) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
if (!NewUB)
return true;
UB = NewUB;
- TestIsLessOp = LessOp;
+ if (LessOp)
+ TestIsLessOp = LessOp;
TestIsStrictOp = StrictOp;
ConditionSrcRange = SR;
ConditionLoc = SL;
@@ -3804,7 +4105,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
return true;
if (!NewStep->isValueDependent()) {
// Check that the step is integer expression.
- SourceLocation StepLoc = NewStep->getLocStart();
+ SourceLocation StepLoc = NewStep->getBeginLoc();
ExprResult Val = SemaRef.PerformOpenMPImplicitIntegerConversion(
StepLoc, getExprAsWritten(NewStep));
if (Val.isInvalid())
@@ -3830,18 +4131,23 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
bool IsConstPos =
IsConstant && Result.isSigned() && (Subtract == Result.isNegative());
bool IsConstZero = IsConstant && !Result.getBoolValue();
+
+ // != with increment is treated as <; != with decrement is treated as >
+ if (!TestIsLessOp.hasValue())
+ TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
if (UB && (IsConstZero ||
- (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
- : (IsConstPos || (IsUnsigned && !Subtract))))) {
+ (TestIsLessOp.getValue() ?
+ (IsConstNeg || (IsUnsigned && Subtract)) :
+ (IsConstPos || (IsUnsigned && !Subtract))))) {
SemaRef.Diag(NewStep->getExprLoc(),
diag::err_omp_loop_incr_not_compatible)
- << LCDecl << TestIsLessOp << NewStep->getSourceRange();
+ << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
SemaRef.Diag(ConditionLoc,
diag::note_omp_loop_cond_requres_compatible_incr)
- << TestIsLessOp << ConditionSrcRange;
+ << TestIsLessOp.getValue() << ConditionSrcRange;
return true;
}
- if (TestIsLessOp == Subtract) {
+ if (TestIsLessOp.getValue() == Subtract) {
NewStep =
SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
.get();
@@ -3897,10 +4203,15 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
if (Var->hasInit() && !Var->getType()->isReferenceType()) {
// Accept non-canonical init form here but emit ext. warning.
if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
- SemaRef.Diag(S->getLocStart(),
+ SemaRef.Diag(S->getBeginLoc(),
diag::ext_omp_loop_not_canonical_init)
<< S->getSourceRange();
- return setLCDeclAndLB(Var, nullptr, Var->getInit());
+ return setLCDeclAndLB(
+ Var,
+ buildDeclRefExpr(SemaRef, Var,
+ Var->getType().getNonReferenceType(),
+ DS->getBeginLoc()),
+ Var->getInit());
}
}
}
@@ -3924,7 +4235,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
if (EmitDiags) {
- SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init)
+ SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_init)
<< S->getSourceRange();
}
return true;
@@ -3964,7 +4275,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
return true;
}
S = getExprAsWritten(S);
- SourceLocation CondLoc = S->getLocStart();
+ SourceLocation CondLoc = S->getBeginLoc();
if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isRelationalOp()) {
if (getInitLCDecl(BO->getLHS()) == LCDecl)
@@ -3977,7 +4288,12 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
(BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
- }
+ } else if (BO->getOpcode() == BO_NE)
+ return setUB(getInitLCDecl(BO->getLHS()) == LCDecl ?
+ BO->getRHS() : BO->getLHS(),
+ /*LessOp=*/llvm::None,
+ /*StrictOp=*/true,
+ BO->getSourceRange(), BO->getOperatorLoc());
} else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getNumArgs() == 2) {
auto Op = CE->getOperator();
@@ -3995,6 +4311,14 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
CE->getOperatorLoc());
break;
+ case OO_ExclaimEqual:
+ return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ?
+ CE->getArg(1) : CE->getArg(0),
+ /*LessOp=*/llvm::None,
+ /*StrictOp=*/true,
+ CE->getSourceRange(),
+ CE->getOperatorLoc());
+ break;
default:
break;
}
@@ -4033,7 +4357,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetIncRHS(Expr *RHS) {
}
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
- SemaRef.Diag(RHS->getLocStart(), diag::err_omp_loop_not_canonical_incr)
+ SemaRef.Diag(RHS->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
<< RHS->getSourceRange() << LCDecl;
return true;
}
@@ -4066,7 +4390,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
if (UO->isIncrementDecrementOp() &&
getInitLCDecl(UO->getSubExpr()) == LCDecl)
return setStep(SemaRef
- .ActOnIntegerConstant(UO->getLocStart(),
+ .ActOnIntegerConstant(UO->getBeginLoc(),
(UO->isDecrementOp() ? -1 : 1))
.get(),
/*Subtract=*/false);
@@ -4091,7 +4415,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
if (getInitLCDecl(CE->getArg(0)) == LCDecl)
return setStep(SemaRef
.ActOnIntegerConstant(
- CE->getLocStart(),
+ CE->getBeginLoc(),
((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
.get(),
/*Subtract=*/false);
@@ -4111,7 +4435,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
}
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
- SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_incr)
+ SemaRef.Diag(S->getBeginLoc(), diag::err_omp_loop_not_canonical_incr)
<< S->getSourceRange() << LCDecl;
return true;
}
@@ -4143,8 +4467,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
- Expr *UBExpr = TestIsLessOp ? UB : LB;
- Expr *LBExpr = TestIsLessOp ? LB : UB;
+ Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
+ Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
if (!Upper || !Lower)
@@ -4155,7 +4479,7 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
// BuildBinOp already emitted error, this one is to point user to upper
// and lower bound, and to tell what is passed to 'operator-'.
- SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx)
+ SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
<< Upper->getSourceRange() << Lower->getSourceRange();
return nullptr;
}
@@ -4245,8 +4569,9 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
ExprResult CondExpr =
SemaRef.BuildBinOp(S, DefaultLoc,
- TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
- : (TestIsStrictOp ? BO_GT : BO_GE),
+ TestIsLessOp.getValue() ?
+ (TestIsStrictOp ? BO_LT : BO_LE) :
+ (TestIsStrictOp ? BO_GT : BO_GE),
NewLB.get(), NewUB.get());
if (CondExpr.isUsable()) {
if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
@@ -4262,7 +4587,8 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
/// Build reference expression to the counter be used for codegen.
DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
- llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, DSAStackTy &DSA) const {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
+ DSAStackTy &DSA) const {
auto *VD = dyn_cast<VarDecl>(LCDecl);
if (!VD) {
VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
@@ -4302,6 +4628,63 @@ Expr *OpenMPIterationSpaceChecker::buildCounterInit() const { return LB; }
/// Build step of the counter be used for codegen.
Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
+Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
+ Scope *S, Expr *Counter,
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
+ Expr *Inc, OverloadedOperatorKind OOK) {
+ Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
+ if (!Cnt)
+ return nullptr;
+ if (Inc) {
+ assert((OOK == OO_Plus || OOK == OO_Minus) &&
+ "Expected only + or - operations for depend clauses.");
+ BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
+ Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
+ if (!Cnt)
+ return nullptr;
+ }
+ ExprResult Diff;
+ QualType VarType = LCDecl->getType().getNonReferenceType();
+ if (VarType->isIntegerType() || VarType->isPointerType() ||
+ SemaRef.getLangOpts().CPlusPlus) {
+ // Upper - Lower
+ Expr *Upper =
+ TestIsLessOp.getValue() ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
+ Expr *Lower =
+ TestIsLessOp.getValue() ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
+ if (!Upper || !Lower)
+ return nullptr;
+
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
+
+ if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
+ // BuildBinOp already emitted error, this one is to point user to upper
+ // and lower bound, and to tell what is passed to 'operator-'.
+ SemaRef.Diag(Upper->getBeginLoc(), diag::err_omp_loop_diff_cxx)
+ << Upper->getSourceRange() << Lower->getSourceRange();
+ return nullptr;
+ }
+ }
+
+ if (!Diff.isUsable())
+ return nullptr;
+
+ // Parentheses (for dumping/debugging purposes only).
+ Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ if (!NewStep.isUsable())
+ return nullptr;
+ // (Upper - Lower) / Step
+ Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
+ if (!Diff.isUsable())
+ return nullptr;
+
+ return Diff.get();
+}
+
/// Iteration space of a single for loop.
struct LoopIterationSpace final {
/// Condition of the loop.
@@ -4336,6 +4719,7 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
unsigned AssociatedLoops = DSAStack->getAssociatedLoops();
if (AssociatedLoops > 0 &&
isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
+ DSAStack->loopStart();
OpenMPIterationSpaceChecker ISC(*this, ForLoc);
if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
if (ValueDecl *D = ISC.getLoopDecl()) {
@@ -4350,6 +4734,15 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
}
}
DSAStack->addLoopControlVariable(D, VD);
+ const Decl *LD = DSAStack->getPossiblyLoopCunter();
+ if (LD != D->getCanonicalDecl()) {
+ DSAStack->resetPossibleLoopCounter();
+ if (auto *Var = dyn_cast_or_null<VarDecl>(LD))
+ MarkDeclarationsReferencedInExpr(
+ buildDeclRefExpr(*this, const_cast<VarDecl *>(Var),
+ Var->getType().getNonLValueExprType(Context),
+ ForLoc, /*RefersToCapture=*/true));
+ }
}
}
DSAStack->setAssociatedLoops(AssociatedLoops - 1);
@@ -4361,7 +4754,8 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
static bool checkOpenMPIterationSpace(
OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
- Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr,
+ unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
+ Expr *OrderedLoopCountExpr,
Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
LoopIterationSpace &ResultIterSpace,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
@@ -4369,11 +4763,11 @@ static bool checkOpenMPIterationSpace(
// for (init-expr; test-expr; incr-expr) structured-block
auto *For = dyn_cast_or_null<ForStmt>(S);
if (!For) {
- SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
+ SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
<< (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
- << getOpenMPDirectiveName(DKind) << NestedLoopCount
+ << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
<< (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
- if (NestedLoopCount > 1) {
+ if (TotalNestedLoopCount > 1) {
if (CollapseLoopCountExpr && OrderedLoopCountExpr)
SemaRef.Diag(DSA.getConstructLoc(),
diag::note_omp_collapse_ordered_expr)
@@ -4414,7 +4808,7 @@ static bool checkOpenMPIterationSpace(
if (!VarType->isDependentType() && !VarType->isIntegerType() &&
!VarType->isPointerType() &&
!(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) {
- SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_variable_type)
+ SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_variable_type)
<< SemaRef.getLangOpts().CPlusPlus;
HasErrors = true;
}
@@ -4452,7 +4846,7 @@ static bool checkOpenMPIterationSpace(
!isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
(DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) {
- SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa)
+ SemaRef.Diag(Init->getBeginLoc(), diag::err_omp_loop_var_dsa)
<< getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind)
<< getOpenMPClauseName(PredeterminedCKind);
if (DVar.RefExpr == nullptr)
@@ -4506,6 +4900,41 @@ static bool checkOpenMPIterationSpace(
ResultIterSpace.PrivateCounterVar == nullptr ||
ResultIterSpace.CounterInit == nullptr ||
ResultIterSpace.CounterStep == nullptr);
+ if (!HasErrors && DSA.isOrderedRegion()) {
+ if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
+ if (CurrentNestedLoopCount <
+ DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
+ DSA.getOrderedRegionParam().second->setLoopNumIterations(
+ CurrentNestedLoopCount, ResultIterSpace.NumIterations);
+ DSA.getOrderedRegionParam().second->setLoopCounter(
+ CurrentNestedLoopCount, ResultIterSpace.CounterVar);
+ }
+ }
+ for (auto &Pair : DSA.getDoacrossDependClauses()) {
+ if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
+ // Erroneous case - clause has some problems.
+ continue;
+ }
+ if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
+ Pair.second.size() <= CurrentNestedLoopCount) {
+ // Erroneous case - clause has some problems.
+ Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
+ continue;
+ }
+ Expr *CntValue;
+ if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
+ CntValue = ISC.buildOrderedLoopData(
+ DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
+ Pair.first->getDependencyLoc());
+ else
+ CntValue = ISC.buildOrderedLoopData(
+ DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
+ Pair.first->getDependencyLoc(),
+ Pair.second[CurrentNestedLoopCount].first,
+ Pair.second[CurrentNestedLoopCount].second);
+ Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
+ }
+ }
return HasErrors;
}
@@ -4687,14 +5116,16 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
unsigned NestedLoopCount = 1;
if (CollapseLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
- NestedLoopCount = Result.getLimitedValue();
+ NestedLoopCount = Result.Val.getInt().getLimitedValue();
}
+ unsigned OrderedLoopCount = 1;
if (OrderedLoopCountExpr) {
// Found 'ordered' clause - calculate collapse number.
- llvm::APSInt Result;
- if (OrderedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) {
+ Expr::EvalResult EVResult;
+ if (OrderedLoopCountExpr->EvaluateAsInt(EVResult, SemaRef.getASTContext())) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if (Result.getLimitedValue() < NestedLoopCount) {
SemaRef.Diag(OrderedLoopCountExpr->getExprLoc(),
diag::err_omp_wrong_ordered_loop_count)
@@ -4703,20 +5134,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
diag::note_collapse_loop_count)
<< CollapseLoopCountExpr->getSourceRange();
}
- NestedLoopCount = Result.getLimitedValue();
+ OrderedLoopCount = Result.getLimitedValue();
}
}
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
SmallVector<LoopIterationSpace, 4> IterSpaces;
- IterSpaces.resize(NestedLoopCount);
+ IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
- if (checkOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
- NestedLoopCount, CollapseLoopCountExpr,
- OrderedLoopCountExpr, VarsWithImplicitDSA,
- IterSpaces[Cnt], Captures))
+ if (checkOpenMPIterationSpace(
+ DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
+ std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
+ OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
+ Captures))
return 0;
// Move on to the next nested for loop, or to the loop body.
// OpenMP [2.8.1, simd construct, Restrictions]
@@ -4725,6 +5157,27 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// any two loops.
CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
}
+ for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
+ if (checkOpenMPIterationSpace(
+ DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
+ std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
+ OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
+ Captures))
+ return 0;
+ if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
+ // Handle initialization of captured loop iterator variables.
+ auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
+ if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
+ Captures[DRE] = DRE;
+ }
+ }
+ // Move on to the next nested for loop, or to the loop body.
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // All loops associated with the construct must be perfectly nested; that
+ // is, there must be no intervening code nor any OpenMP directive between
+ // any two loops.
+ CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
+ }
Built.clear(/* size */ NestedLoopCount);
@@ -4817,13 +5270,14 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Choose either the 32-bit or 64-bit version.
ExprResult LastIteration = LastIteration64;
- if (LastIteration32.isUsable() &&
- C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
- (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
- fitsInto(
- /*Bits=*/32,
- LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
- LastIteration64.get(), SemaRef)))
+ if (SemaRef.getLangOpts().OpenMPOptimisticCollapse ||
+ (LastIteration32.isUsable() &&
+ C.getTypeSize(LastIteration32.get()->getType()) == 32 &&
+ (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 ||
+ fitsInto(
+ /*Bits=*/32,
+ LastIteration32.get()->getType()->hasSignedIntegerRepresentation(),
+ LastIteration64.get(), SemaRef))))
LastIteration = LastIteration32;
QualType VType = LastIteration.get()->getType();
QualType RealVType = VType;
@@ -4913,7 +5367,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
LastIteration.get(), UB.get());
EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(),
CondOp.get());
- EUB = SemaRef.ActOnFinishFullExpr(EUB.get());
+ EUB = SemaRef.ActOnFinishFullExpr(EUB.get(), /*DiscardedValue*/ false);
// If we have a combined directive that combines 'distribute', 'for' or
// 'simd' we need to be able to access the bounds of the schedule of the
@@ -4942,7 +5396,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
LastIteration.get(), CombUB.get());
CombEUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, CombUB.get(),
CombCondOp.get());
- CombEUB = SemaRef.ActOnFinishFullExpr(CombEUB.get());
+ CombEUB =
+ SemaRef.ActOnFinishFullExpr(CombEUB.get(), /*DiscardedValue*/ false);
const CapturedDecl *CD = cast<CapturedStmt>(AStmt)->getCapturedDecl();
// We expect to have at least 2 more parameters than the 'parallel'
@@ -4976,7 +5431,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
? LB.get()
: SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
- Init = SemaRef.ActOnFinishFullExpr(Init.get());
+ Init = SemaRef.ActOnFinishFullExpr(Init.get(), /*DiscardedValue*/ false);
if (isOpenMPLoopBoundSharingDirective(DKind)) {
Expr *CombRHS =
@@ -4987,32 +5442,40 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
: SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
CombInit =
SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), CombRHS);
- CombInit = SemaRef.ActOnFinishFullExpr(CombInit.get());
+ CombInit =
+ SemaRef.ActOnFinishFullExpr(CombInit.get(), /*DiscardedValue*/ false);
}
}
// Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops.
- SourceLocation CondLoc = AStmt->getLocStart();
+ SourceLocation CondLoc = AStmt->getBeginLoc();
ExprResult Cond =
(isOpenMPWorksharingDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get())
: SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(),
NumIterations.get());
+ ExprResult CombDistCond;
+ if (isOpenMPLoopBoundSharingDirective(DKind)) {
+ CombDistCond =
+ SemaRef.BuildBinOp(
+ CurScope, CondLoc, BO_LT, IV.get(), NumIterations.get());
+ }
+
ExprResult CombCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
CombCond =
SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), CombUB.get());
}
// Loop increment (IV = IV + 1)
- SourceLocation IncLoc = AStmt->getLocStart();
+ SourceLocation IncLoc = AStmt->getBeginLoc();
ExprResult Inc =
SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(),
SemaRef.ActOnIntegerConstant(IncLoc, 1).get());
if (!Inc.isUsable())
return 0;
Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get());
- Inc = SemaRef.ActOnFinishFullExpr(Inc.get());
+ Inc = SemaRef.ActOnFinishFullExpr(Inc.get(), /*DiscardedValue*/ false);
if (!Inc.isUsable())
return 0;
@@ -5030,7 +5493,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// LB = LB + ST
NextLB =
SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get());
- NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get());
+ NextLB =
+ SemaRef.ActOnFinishFullExpr(NextLB.get(), /*DiscardedValue*/ false);
if (!NextLB.isUsable())
return 0;
// UB + ST
@@ -5040,7 +5504,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// UB = UB + ST
NextUB =
SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get());
- NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get());
+ NextUB =
+ SemaRef.ActOnFinishFullExpr(NextUB.get(), /*DiscardedValue*/ false);
if (!NextUB.isUsable())
return 0;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
@@ -5051,7 +5516,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// LB = LB + ST
CombNextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombLB.get(),
CombNextLB.get());
- CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get());
+ CombNextLB = SemaRef.ActOnFinishFullExpr(CombNextLB.get(),
+ /*DiscardedValue*/ false);
if (!CombNextLB.isUsable())
return 0;
// UB + ST
@@ -5062,7 +5528,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// UB = UB + ST
CombNextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, CombUB.get(),
CombNextUB.get());
- CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get());
+ CombNextUB = SemaRef.ActOnFinishFullExpr(CombNextUB.get(),
+ /*DiscardedValue*/ false);
if (!CombNextUB.isUsable())
return 0;
}
@@ -5072,8 +5539,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// directive with for as IV = IV + ST; ensure upper bound expression based
// on PrevUB instead of NumIterations - used to implement 'for' when found
// in combination with 'distribute', like in 'distribute parallel for'
- SourceLocation DistIncLoc = AStmt->getLocStart();
- ExprResult DistCond, DistInc, PrevEUB;
+ SourceLocation DistIncLoc = AStmt->getBeginLoc();
+ ExprResult DistCond, DistInc, PrevEUB, ParForInDistCond;
if (isOpenMPLoopBoundSharingDirective(DKind)) {
DistCond = SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get());
assert(DistCond.isUsable() && "distribute cond expr was not built");
@@ -5083,19 +5550,26 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
assert(DistInc.isUsable() && "distribute inc expr was not built");
DistInc = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, IV.get(),
DistInc.get());
- DistInc = SemaRef.ActOnFinishFullExpr(DistInc.get());
+ DistInc =
+ SemaRef.ActOnFinishFullExpr(DistInc.get(), /*DiscardedValue*/ false);
assert(DistInc.isUsable() && "distribute inc expr was not built");
// Build expression: UB = min(UB, prevUB) for #for in composite or combined
// construct
- SourceLocation DistEUBLoc = AStmt->getLocStart();
+ SourceLocation DistEUBLoc = AStmt->getBeginLoc();
ExprResult IsUBGreater =
SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
CondOp.get());
- PrevEUB = SemaRef.ActOnFinishFullExpr(PrevEUB.get());
+ PrevEUB =
+ SemaRef.ActOnFinishFullExpr(PrevEUB.get(), /*DiscardedValue*/ false);
+
+ // Build IV <= PrevUB to be used in parallel for is in combination with
+ // a distribute directive with schedule(static, 1)
+ ParForInDistCond =
+ SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), PrevUB.get());
}
// Build updates and final values of the loop counters.
@@ -5104,33 +5578,60 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.Inits.resize(NestedLoopCount);
Built.Updates.resize(NestedLoopCount);
Built.Finals.resize(NestedLoopCount);
- SmallVector<Expr *, 4> LoopMultipliers;
{
- ExprResult Div;
- // Go from inner nested loop to outer.
- for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
+ // We implement the following algorithm for obtaining the
+ // original loop iteration variable values based on the
+ // value of the collapsed loop iteration variable IV.
+ //
+ // Let n+1 be the number of collapsed loops in the nest.
+ // Iteration variables (I0, I1, .... In)
+ // Iteration counts (N0, N1, ... Nn)
+ //
+ // Acc = IV;
+ //
+ // To compute Ik for loop k, 0 <= k <= n, generate:
+ // Prod = N(k+1) * N(k+2) * ... * Nn;
+ // Ik = Acc / Prod;
+ // Acc -= Ik * Prod;
+ //
+ ExprResult Acc = IV;
+ for (unsigned int Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
LoopIterationSpace &IS = IterSpaces[Cnt];
SourceLocation UpdLoc = IS.IncSrcRange.getBegin();
- // Build: Iter = (IV / Div) % IS.NumIters
- // where Div is product of previous iterations' IS.NumIters.
ExprResult Iter;
- if (Div.isUsable()) {
- Iter =
- SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get());
- } else {
- Iter = IV;
- assert((Cnt == (int)NestedLoopCount - 1) &&
- "unusable div expected on first iteration only");
- }
- if (Cnt != 0 && Iter.isUsable())
- Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(),
- IS.NumIterations);
+ // Compute prod
+ ExprResult Prod =
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
+ for (unsigned int K = Cnt+1; K < NestedLoopCount; ++K)
+ Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Prod.get(),
+ IterSpaces[K].NumIterations);
+
+ // Iter = Acc / Prod
+ // If there is at least one more inner loop to avoid
+ // multiplication by 1.
+ if (Cnt + 1 < NestedLoopCount)
+ Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div,
+ Acc.get(), Prod.get());
+ else
+ Iter = Acc;
if (!Iter.isUsable()) {
HasErrors = true;
break;
}
+ // Update Acc:
+ // Acc -= Iter * Prod
+ // Check if there is at least one more inner loop to avoid
+ // multiplication by 1.
+ if (Cnt + 1 < NestedLoopCount)
+ Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul,
+ Iter.get(), Prod.get());
+ else
+ Prod = Iter;
+ Acc = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Sub,
+ Acc.get(), Prod.get());
+
// Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
DeclRefExpr *CounterVar = buildDeclRefExpr(
@@ -5159,23 +5660,6 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
break;
}
- // Build Div for the next iteration: Div <- Div * IS.NumIters
- if (Cnt != 0) {
- if (Div.isUnset())
- Div = IS.NumIterations;
- else
- Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(),
- IS.NumIterations);
-
- // Add parentheses (for debugging purposes only).
- if (Div.isUsable())
- Div = tryBuildCapture(SemaRef, Div.get(), Captures);
- if (!Div.isUsable()) {
- HasErrors = true;
- break;
- }
- LoopMultipliers.push_back(Div.get());
- }
if (!Update.isUsable() || !Final.isUsable()) {
HasErrors = true;
break;
@@ -5196,8 +5680,10 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.IterationVarRef = IV.get();
Built.LastIteration = LastIteration.get();
Built.NumIterations = NumIterations.get();
- Built.CalcLastIteration =
- SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get();
+ Built.CalcLastIteration = SemaRef
+ .ActOnFinishFullExpr(CalcLastIteration.get(),
+ /*DiscardedValue*/ false)
+ .get();
Built.PreCond = PreCond.get();
Built.PreInits = buildPreInits(C, Captures);
Built.Cond = Cond.get();
@@ -5221,55 +5707,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.DistCombinedFields.Cond = CombCond.get();
Built.DistCombinedFields.NLB = CombNextLB.get();
Built.DistCombinedFields.NUB = CombNextUB.get();
-
- Expr *CounterVal = SemaRef.DefaultLvalueConversion(IV.get()).get();
- // Fill data for doacross depend clauses.
- for (const auto &Pair : DSA.getDoacrossDependClauses()) {
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_source) {
- Pair.first->setCounterValue(CounterVal);
- } else {
- if (NestedLoopCount != Pair.second.size() ||
- NestedLoopCount != LoopMultipliers.size() + 1) {
- // Erroneous case - clause has some problems.
- Pair.first->setCounterValue(CounterVal);
- continue;
- }
- assert(Pair.first->getDependencyKind() == OMPC_DEPEND_sink);
- auto I = Pair.second.rbegin();
- auto IS = IterSpaces.rbegin();
- auto ILM = LoopMultipliers.rbegin();
- Expr *UpCounterVal = CounterVal;
- Expr *Multiplier = nullptr;
- for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
- if (I->first) {
- assert(IS->CounterStep);
- Expr *NormalizedOffset =
- SemaRef
- .BuildBinOp(CurScope, I->first->getExprLoc(), BO_Div,
- I->first, IS->CounterStep)
- .get();
- if (Multiplier) {
- NormalizedOffset =
- SemaRef
- .BuildBinOp(CurScope, I->first->getExprLoc(), BO_Mul,
- NormalizedOffset, Multiplier)
- .get();
- }
- assert(I->second == OO_Plus || I->second == OO_Minus);
- BinaryOperatorKind BOK = (I->second == OO_Plus) ? BO_Add : BO_Sub;
- UpCounterVal = SemaRef
- .BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
- UpCounterVal, NormalizedOffset)
- .get();
- }
- Multiplier = *ILM;
- ++I;
- ++IS;
- ++ILM;
- }
- Pair.first->setCounterValue(UpCounterVal);
- }
- }
+ Built.DistCombinedFields.DistCond = CombDistCond.get();
+ Built.DistCombinedFields.ParForInDistCond = ParForInDistCond.get();
return NestedLoopCount;
}
@@ -5305,7 +5744,6 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
}
if (Simdlen && Safelen) {
- llvm::APSInt SimdlenRes, SafelenRes;
const Expr *SimdlenLength = Simdlen->getSimdlen();
const Expr *SafelenLength = Safelen->getSafelen();
if (SimdlenLength->isValueDependent() || SimdlenLength->isTypeDependent() ||
@@ -5316,8 +5754,11 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
SafelenLength->isInstantiationDependent() ||
SafelenLength->containsUnexpandedParameterPack())
return false;
- SimdlenLength->EvaluateAsInt(SimdlenRes, S.Context);
- SafelenLength->EvaluateAsInt(SafelenRes, S.Context);
+ Expr::EvalResult SimdlenResult, SafelenResult;
+ SimdlenLength->EvaluateAsInt(SimdlenResult, S.Context);
+ SafelenLength->EvaluateAsInt(SafelenResult, S.Context);
+ llvm::APSInt SimdlenRes = SimdlenResult.Val.getInt();
+ llvm::APSInt SafelenRes = SafelenResult.Val.getInt();
// OpenMP 4.5 [2.8.1, simd Construct, Restrictions]
// If both simdlen and safelen clauses are specified, the value of the
// simdlen parameter must be less than or equal to the value of the safelen
@@ -5466,7 +5907,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
- Diag(SectionStmt->getLocStart(),
+ Diag(SectionStmt->getBeginLoc(),
diag::err_omp_sections_substmt_not_section);
return StmtError();
}
@@ -5474,7 +5915,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
->setHasCancel(DSAStack->isCancelRegion());
}
} else {
- Diag(AStmt->getLocStart(), diag::err_omp_sections_not_compound_stmt);
+ Diag(AStmt->getBeginLoc(), diag::err_omp_sections_not_compound_stmt);
return StmtError();
}
@@ -5520,9 +5961,9 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
else if (Clause->getClauseKind() == OMPC_copyprivate)
Copyprivate = Clause;
if (Copyprivate && Nowait) {
- Diag(Copyprivate->getLocStart(),
+ Diag(Copyprivate->getBeginLoc(),
diag::err_omp_single_copyprivate_with_nowait);
- Diag(Nowait->getLocStart(), diag::note_omp_nowait_clause_here);
+ Diag(Nowait->getBeginLoc(), diag::note_omp_nowait_clause_here);
return StmtError();
}
}
@@ -5558,7 +5999,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
for (const OMPClause *C : Clauses) {
if (C->getClauseKind() == OMPC_hint) {
if (!DirName.getName()) {
- Diag(C->getLocStart(), diag::err_omp_hint_clause_no_name);
+ Diag(C->getBeginLoc(), diag::err_omp_hint_clause_no_name);
ErrorFound = true;
}
Expr *E = cast<OMPHintClause>(C)->getHint();
@@ -5567,7 +6008,7 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
DependentHint = true;
} else {
Hint = E->EvaluateKnownConstInt(Context);
- HintLoc = C->getLocStart();
+ HintLoc = C->getBeginLoc();
}
}
}
@@ -5583,12 +6024,12 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
else
Diag(StartLoc, diag::note_omp_critical_no_hint) << 0;
if (const auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
- Diag(C->getLocStart(), diag::note_omp_critical_hint_here)
+ Diag(C->getBeginLoc(), diag::note_omp_critical_hint_here)
<< 1
<< C->getHint()->EvaluateKnownConstInt(Context).toString(
/*Radix=*/10, /*Signed=*/false);
} else {
- Diag(Pair.first->getLocStart(), diag::note_omp_critical_no_hint) << 1;
+ Diag(Pair.first->getBeginLoc(), diag::note_omp_critical_no_hint) << 1;
}
}
}
@@ -5709,7 +6150,7 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
- Diag(SectionStmt->getLocStart(),
+ Diag(SectionStmt->getBeginLoc(),
diag::err_omp_parallel_sections_substmt_not_section);
return StmtError();
}
@@ -5717,7 +6158,7 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
->setHasCancel(DSAStack->isCancelRegion());
}
} else {
- Diag(AStmt->getLocStart(),
+ Diag(AStmt->getBeginLoc(),
diag::err_omp_parallel_sections_not_compound_stmt);
return StmtError();
}
@@ -5801,7 +6242,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
DependFound = C;
if (DC->getDependencyKind() == OMPC_DEPEND_source) {
if (DependSourceClause) {
- Diag(C->getLocStart(), diag::err_omp_more_one_clause)
+ Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(OMPD_ordered)
<< getOpenMPClauseName(OMPC_depend) << 2;
ErrorFound = true;
@@ -5809,13 +6250,13 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
DependSourceClause = C;
}
if (DependSinkClause) {
- Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
<< 0;
ErrorFound = true;
}
} else if (DC->getDependencyKind() == OMPC_DEPEND_sink) {
if (DependSourceClause) {
- Diag(C->getLocStart(), diag::err_omp_depend_sink_source_not_allowed)
+ Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
<< 1;
ErrorFound = true;
}
@@ -5835,19 +6276,19 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_prohibited_region_simd);
ErrorFound = true;
} else if (DependFound && (TC || SC)) {
- Diag(DependFound->getLocStart(), diag::err_omp_depend_clause_thread_simd)
+ Diag(DependFound->getBeginLoc(), diag::err_omp_depend_clause_thread_simd)
<< getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
ErrorFound = true;
- } else if (DependFound && !DSAStack->getParentOrderedRegionParam()) {
- Diag(DependFound->getLocStart(),
+ } else if (DependFound && !DSAStack->getParentOrderedRegionParam().first) {
+ Diag(DependFound->getBeginLoc(),
diag::err_omp_ordered_directive_without_param);
ErrorFound = true;
} else if (TC || Clauses.empty()) {
- if (const Expr *Param = DSAStack->getParentOrderedRegionParam()) {
- SourceLocation ErrLoc = TC ? TC->getLocStart() : StartLoc;
+ if (const Expr *Param = DSAStack->getParentOrderedRegionParam().first) {
+ SourceLocation ErrLoc = TC ? TC->getBeginLoc() : StartLoc;
Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
<< (TC != nullptr);
- Diag(Param->getLocStart(), diag::note_omp_ordered_param);
+ Diag(Param->getBeginLoc(), diag::note_omp_ordered_param);
ErrorFound = true;
}
}
@@ -6068,12 +6509,12 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
}
} else {
ErrorFound = NotAScalarType;
- NoteLoc = ErrorLoc = AtomicBody->getLocStart();
+ NoteLoc = ErrorLoc = AtomicBody->getBeginLoc();
NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
}
} else {
ErrorFound = NotAnExpression;
- NoteLoc = ErrorLoc = S->getLocStart();
+ NoteLoc = ErrorLoc = S->getBeginLoc();
NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
}
if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) {
@@ -6125,13 +6566,13 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
C->getClauseKind() == OMPC_update ||
C->getClauseKind() == OMPC_capture) {
if (AtomicKind != OMPC_unknown) {
- Diag(C->getLocStart(), diag::err_omp_atomic_several_clauses)
- << SourceRange(C->getLocStart(), C->getLocEnd());
+ Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause)
<< getOpenMPClauseName(AtomicKind);
} else {
AtomicKind = C->getClauseKind();
- AtomicKindLoc = C->getLocStart();
+ AtomicKindLoc = C->getBeginLoc();
}
}
}
@@ -6219,7 +6660,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
} else {
ErrorFound = NotAnExpression;
- NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteLoc = ErrorLoc = Body->getBeginLoc();
NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
}
if (ErrorFound != NoError) {
@@ -6281,7 +6722,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
} else {
ErrorFound = NotAnExpression;
- NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteLoc = ErrorLoc = Body->getBeginLoc();
NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc);
}
if (ErrorFound != NoError) {
@@ -6465,7 +6906,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) {
ErrorFound = NotAnAssignmentOp;
NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc()
- : First->getLocStart();
+ : First->getBeginLoc();
NoteRange = ErrorRange = FirstBinOp
? FirstBinOp->getSourceRange()
: SourceRange(ErrorLoc, ErrorLoc);
@@ -6475,7 +6916,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
ErrorFound = NotAnAssignmentOp;
NoteLoc = ErrorLoc = SecondBinOp
? SecondBinOp->getOperatorLoc()
- : Second->getLocStart();
+ : Second->getBeginLoc();
NoteRange = ErrorRange =
SecondBinOp ? SecondBinOp->getSourceRange()
: SourceRange(ErrorLoc, ErrorLoc);
@@ -6509,15 +6950,15 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
}
}
} else {
- NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteLoc = ErrorLoc = Body->getBeginLoc();
NoteRange = ErrorRange =
- SourceRange(Body->getLocStart(), Body->getLocStart());
+ SourceRange(Body->getBeginLoc(), Body->getBeginLoc());
ErrorFound = NotTwoSubstatements;
}
} else {
- NoteLoc = ErrorLoc = Body->getLocStart();
+ NoteLoc = ErrorLoc = Body->getBeginLoc();
NoteRange = ErrorRange =
- SourceRange(Body->getLocStart(), Body->getLocStart());
+ SourceRange(Body->getBeginLoc(), Body->getBeginLoc());
ErrorFound = NotACompoundStatement;
}
if (ErrorFound != NoError) {
@@ -6590,7 +7031,7 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_target_contains_not_only_teams);
Diag(DSAStack->getInnerTeamsRegionLoc(),
diag::note_omp_nested_teams_construct_here);
- Diag(S->getLocStart(), diag::note_omp_nested_statement_here)
+ Diag(S->getBeginLoc(), diag::note_omp_nested_statement_here)
<< isa<OMPExecutableDirective>(S);
return StmtError();
}
@@ -6894,11 +7335,11 @@ static bool checkGrainsizeNumTasksClauses(Sema &S,
if (!PrevClause)
PrevClause = C;
else if (PrevClause->getClauseKind() != C->getClauseKind()) {
- S.Diag(C->getLocStart(),
+ S.Diag(C->getBeginLoc(),
diag::err_omp_grainsize_num_tasks_mutually_exclusive)
<< getOpenMPClauseName(C->getClauseKind())
<< getOpenMPClauseName(PrevClause->getClauseKind());
- S.Diag(PrevClause->getLocStart(),
+ S.Diag(PrevClause->getBeginLoc(),
diag::note_omp_previous_grainsize_num_tasks)
<< getOpenMPClauseName(PrevClause->getClauseKind());
ErrorFound = true;
@@ -6927,9 +7368,9 @@ static bool checkReductionClauseWithNogroup(Sema &S,
}
}
if (ReductionClause && NogroupClause) {
- S.Diag(ReductionClause->getLocStart(), diag::err_omp_reduction_with_nogroup)
- << SourceRange(NogroupClause->getLocStart(),
- NogroupClause->getLocEnd());
+ S.Diag(ReductionClause->getBeginLoc(), diag::err_omp_reduction_with_nogroup)
+ << SourceRange(NogroupClause->getBeginLoc(),
+ NogroupClause->getEndLoc());
return true;
}
return false;
@@ -7859,6 +8300,11 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -7944,6 +8390,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_distribute_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with if-clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8009,6 +8456,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_distribute_simd:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_threads-clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8072,6 +8520,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_ordered:
case OMPD_atomic:
case OMPD_distribute_simd:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8135,6 +8584,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_ordered:
case OMPD_atomic:
case OMPD_distribute_simd:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with thread_limit-clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8198,6 +8648,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_target_teams:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8261,6 +8712,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_ordered:
case OMPD_atomic:
case OMPD_target_teams:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8324,6 +8776,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_ordered:
case OMPD_atomic:
case OMPD_distribute_simd:
+ case OMPD_requires:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
llvm_unreachable("Unknown OpenMP directive");
@@ -8373,6 +8826,11 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Unexpected OpenMP clause.");
}
return CaptureRegion;
@@ -8619,9 +9077,11 @@ OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
} else {
NumForLoops = nullptr;
}
- DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops);
- return new (Context)
- OMPOrderedClause(NumForLoops, StartLoc, LParenLoc, EndLoc);
+ auto *Clause = OMPOrderedClause::Create(
+ Context, NumForLoops, NumForLoops ? DSAStack->getAssociatedLoops() : 0,
+ StartLoc, LParenLoc, EndLoc);
+ DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops, Clause);
+ return Clause;
}
OMPClause *Sema::ActOnOpenMPSimpleClause(
@@ -8639,6 +9099,11 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static_cast<OpenMPProcBindClauseKind>(Argument), ArgumentLoc, StartLoc,
LParenLoc, EndLoc);
break;
+ case OMPC_atomic_default_mem_order:
+ Res = ActOnOpenMPAtomicDefaultMemOrderClause(
+ static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -8688,6 +9153,10 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -8760,6 +9229,21 @@ OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
+ OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindKwLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
+ if (Kind == OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown) {
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(
+ OMPC_atomic_default_mem_order, /*First=*/0,
+ /*Last=*/OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown)
+ << getOpenMPClauseName(OMPC_atomic_default_mem_order);
+ return nullptr;
+ }
+ return new (Context) OMPAtomicDefaultMemOrderClause(Kind, KindKwLoc, StartLoc,
+ LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -8844,6 +9328,11 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -8923,7 +9412,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
if (!ChunkSize->isValueDependent() && !ChunkSize->isTypeDependent() &&
!ChunkSize->isInstantiationDependent() &&
!ChunkSize->containsUnexpandedParameterPack()) {
- SourceLocation ChunkSizeLoc = ChunkSize->getLocStart();
+ SourceLocation ChunkSizeLoc = ChunkSize->getBeginLoc();
ExprResult Val =
PerformOpenMPImplicitIntegerConversion(ChunkSizeLoc, ChunkSize);
if (Val.isInvalid())
@@ -8999,6 +9488,18 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_nogroup:
Res = ActOnOpenMPNogroupClause(StartLoc, EndLoc);
break;
+ case OMPC_unified_address:
+ Res = ActOnOpenMPUnifiedAddressClause(StartLoc, EndLoc);
+ break;
+ case OMPC_unified_shared_memory:
+ Res = ActOnOpenMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
+ break;
+ case OMPC_reverse_offload:
+ Res = ActOnOpenMPReverseOffloadClause(StartLoc, EndLoc);
+ break;
+ case OMPC_dynamic_allocators:
+ Res = ActOnOpenMPDynamicAllocatorsClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -9038,6 +9539,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_from:
case OMPC_use_device_ptr:
case OMPC_is_device_ptr:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -9099,12 +9601,34 @@ OMPClause *Sema::ActOnOpenMPNogroupClause(SourceLocation StartLoc,
return new (Context) OMPNogroupClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUnifiedAddressClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPUnifiedSharedMemoryClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPReverseOffloadClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
- OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
+ OpenMPLinearClauseKind LinKind,
+ ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc) {
OMPClause *Res = nullptr;
@@ -9157,9 +9681,9 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
- Res = ActOnOpenMPMapClause(MapTypeModifier, MapType, IsMapTypeImplicit,
- DepLinMapLoc, ColonLoc, VarList, StartLoc,
- LParenLoc, EndLoc);
+ Res = ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc, MapType,
+ IsMapTypeImplicit, DepLinMapLoc, ColonLoc,
+ VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_to:
Res = ActOnOpenMPToClause(VarList, StartLoc, LParenLoc, EndLoc);
@@ -9206,6 +9730,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_defaultmap:
case OMPC_unknown:
case OMPC_uniform:
+ case OMPC_unified_address:
+ case OMPC_unified_shared_memory:
+ case OMPC_reverse_offload:
+ case OMPC_dynamic_allocators:
+ case OMPC_atomic_default_mem_order:
llvm_unreachable("Clause is not allowed.");
}
return Res;
@@ -9321,6 +9850,17 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
continue;
Type = Type.getNonReferenceType();
+ // OpenMP 5.0 [2.19.3, List Item Privatization, Restrictions]
+ // A variable that is privatized must not have a const-qualified type
+ // unless it is of class type with a mutable member. This restriction does
+ // not apply to the firstprivate clause.
+ //
+ // OpenMP 3.1 [2.9.3.3, private clause, Restrictions]
+ // A variable that appears in a private clause must not have a
+ // const-qualified type unless it is of class type with a mutable member.
+ if (rejectConstNotMutableType(*this, D, Type, OMPC_private, ELoc))
+ continue;
+
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct]
// Variables with the predetermined data-sharing attributes may not be
@@ -9742,6 +10282,17 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
continue;
Type = Type.getNonReferenceType();
+ // OpenMP 5.0 [2.19.3, List Item Privatization, Restrictions]
+ // A variable that is privatized must not have a const-qualified type
+ // unless it is of class type with a mutable member. This restriction does
+ // not apply to the firstprivate clause.
+ //
+ // OpenMP 3.1 [2.9.3.5, lastprivate clause, Restrictions]
+ // A variable that appears in a lastprivate clause must not have a
+ // const-qualified type unless it is of class type with a mutable member.
+ if (rejectConstNotMutableType(*this, D, Type, OMPC_lastprivate, ELoc))
+ continue;
+
OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
// OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct]
@@ -9807,8 +10358,8 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
- AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
- /*DiscardedValue=*/true);
+ AssignmentOp =
+ ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
@@ -9971,6 +10522,79 @@ static T filterLookupForUDR(SmallVectorImpl<U> &Lookups,
return T();
}
+static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
+ assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
+
+ for (auto RD : D->redecls()) {
+ // Don't bother with extra checks if we already know this one isn't visible.
+ if (RD == D)
+ continue;
+
+ auto ND = cast<NamedDecl>(RD);
+ if (LookupResult::isVisible(SemaRef, ND))
+ return ND;
+ }
+
+ return nullptr;
+}
+
+static void
+argumentDependentLookup(Sema &SemaRef, const DeclarationNameInfo &ReductionId,
+ SourceLocation Loc, QualType Ty,
+ SmallVectorImpl<UnresolvedSet<8>> &Lookups) {
+ // Find all of the associated namespaces and classes based on the
+ // arguments we have.
+ Sema::AssociatedNamespaceSet AssociatedNamespaces;
+ Sema::AssociatedClassSet AssociatedClasses;
+ OpaqueValueExpr OVE(Loc, Ty, VK_LValue);
+ SemaRef.FindAssociatedClassesAndNamespaces(Loc, &OVE, AssociatedNamespaces,
+ AssociatedClasses);
+
+ // C++ [basic.lookup.argdep]p3:
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains [...] then Y is
+ // empty. Otherwise Y is the set of declarations found in the
+ // namespaces associated with the argument types as described
+ // below. The set of declarations found by the lookup of the name
+ // is the union of X and Y.
+ //
+ // Here, we compute Y and add its members to the overloaded
+ // candidate set.
+ for (auto *NS : AssociatedNamespaces) {
+ // When considering an associated namespace, the lookup is the
+ // same as the lookup performed when the associated namespace is
+ // used as a qualifier (3.4.3.2) except that:
+ //
+ // -- Any using-directives in the associated namespace are
+ // ignored.
+ //
+ // -- Any namespace-scope friend functions declared in
+ // associated classes are visible within their respective
+ // namespaces even if they are not visible during an ordinary
+ // lookup (11.4).
+ DeclContext::lookup_result R = NS->lookup(ReductionId.getName());
+ for (auto *D : R) {
+ auto *Underlying = D;
+ if (auto *USD = dyn_cast<UsingShadowDecl>(D))
+ Underlying = USD->getTargetDecl();
+
+ if (!isa<OMPDeclareReductionDecl>(Underlying))
+ continue;
+
+ if (!SemaRef.isVisible(D)) {
+ D = findAcceptableDecl(SemaRef, D);
+ if (!D)
+ continue;
+ if (auto *USD = dyn_cast<UsingShadowDecl>(D))
+ Underlying = USD->getTargetDecl();
+ }
+ Lookups.emplace_back();
+ Lookups.back().addDecl(Underlying);
+ }
+ }
+}
+
static ExprResult
buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
Scope *S, CXXScopeSpec &ReductionIdScopeSpec,
@@ -9989,7 +10613,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
} while (S && !S->isDeclScope(D));
if (S)
S = S->getParent();
- Lookups.push_back(UnresolvedSet<8>());
+ Lookups.emplace_back();
Lookups.back().append(Lookup.begin(), Lookup.end());
Lookup.clear();
}
@@ -10016,6 +10640,8 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
})) {
UnresolvedSet<8> ResSet;
for (const UnresolvedSet<8> &Set : Lookups) {
+ if (Set.empty())
+ continue;
ResSet.append(Set.begin(), Set.end());
// The last item marks the end of all declarations at the specified scope.
ResSet.addDecl(Set[Set.size() - 1]);
@@ -10025,6 +10651,36 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
ReductionIdScopeSpec.getWithLocInContext(SemaRef.Context), ReductionId,
/*ADL=*/true, /*Overloaded=*/true, ResSet.begin(), ResSet.end());
}
+ // Lookup inside the classes.
+ // C++ [over.match.oper]p3:
+ // For a unary operator @ with an operand of a type whose
+ // cv-unqualified version is T1, and for a binary operator @ with
+ // a left operand of a type whose cv-unqualified version is T1 and
+ // a right operand of a type whose cv-unqualified version is T2,
+ // three sets of candidate functions, designated member
+ // candidates, non-member candidates and built-in candidates, are
+ // constructed as follows:
+ // -- If T1 is a complete class type or a class currently being
+ // defined, the set of member candidates is the result of the
+ // qualified lookup of T1::operator@ (13.3.1.1.1); otherwise,
+ // the set of member candidates is empty.
+ LookupResult Lookup(SemaRef, ReductionId, Sema::LookupOMPReductionName);
+ Lookup.suppressDiagnostics();
+ if (const auto *TyRec = Ty->getAs<RecordType>()) {
+ // Complete the type if it can be completed.
+ // If the type is neither complete nor being defined, bail out now.
+ if (SemaRef.isCompleteType(Loc, Ty) || TyRec->isBeingDefined() ||
+ TyRec->getDecl()->getDefinition()) {
+ Lookup.clear();
+ SemaRef.LookupQualifiedName(Lookup, TyRec->getDecl());
+ if (Lookup.empty()) {
+ Lookups.emplace_back();
+ Lookups.back().append(Lookup.begin(), Lookup.end());
+ }
+ }
+ }
+ // Perform ADL.
+ argumentDependentLookup(SemaRef, ReductionId, Loc, Ty, Lookups);
if (auto *VD = filterLookupForUDR<ValueDecl *>(
Lookups, [&SemaRef, Ty](ValueDecl *D) -> ValueDecl * {
if (!D->isInvalidDecl() &&
@@ -10131,10 +10787,11 @@ static bool checkOMPArraySectionConstantForReduction(
SingleElement = true;
ArraySizes.push_back(llvm::APSInt::get(1));
} else {
- llvm::APSInt ConstantLengthValue;
- if (!Length->EvaluateAsInt(ConstantLengthValue, Context))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, Context))
return false;
+ llvm::APSInt ConstantLengthValue = Result.Val.getInt();
SingleElement = (ConstantLengthValue.getSExtValue() == 1);
ArraySizes.push_back(ConstantLengthValue);
}
@@ -10155,9 +10812,12 @@ static bool checkOMPArraySectionConstantForReduction(
// This is an array subscript which has implicit length 1!
ArraySizes.push_back(llvm::APSInt::get(1));
} else {
- llvm::APSInt ConstantLengthValue;
- if (!Length->EvaluateAsInt(ConstantLengthValue, Context) ||
- ConstantLengthValue.getSExtValue() != 1)
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, Context))
+ return false;
+
+ llvm::APSInt ConstantLengthValue = Result.Val.getInt();
+ if (ConstantLengthValue.getSExtValue() != 1)
return false;
ArraySizes.push_back(ConstantLengthValue);
@@ -10342,76 +11002,71 @@ static bool actOnOMPReductionKindClause(
// OpenMP [2.14.3.6, reduction clause, Restrictions]
// A list item that appears in a reduction clause must not be
// const-qualified.
- if (Type.getNonReferenceType().isConstant(Context)) {
- S.Diag(ELoc, diag::err_omp_const_reduction_list_item) << ERange;
- if (!ASE && !OASE) {
- bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
- VarDecl::DeclarationOnly;
- S.Diag(D->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << D;
- }
+ if (rejectConstNotMutableType(S, D, Type, ClauseKind, ELoc,
+ /*AcceptIfMutable*/ false, ASE || OASE))
continue;
- }
+
+ OpenMPDirectiveKind CurrDir = Stack->getCurrentDirective();
// OpenMP [2.9.3.6, Restrictions, C/C++, p.4]
// If a list-item is a reference type then it must bind to the same object
// for all threads of the team.
- if (!ASE && !OASE && VD) {
- VarDecl *VDDef = VD->getDefinition();
- if (VD->getType()->isReferenceType() && VDDef && VDDef->hasInit()) {
- DSARefChecker Check(Stack);
- if (Check.Visit(VDDef->getInit())) {
- S.Diag(ELoc, diag::err_omp_reduction_ref_type_arg)
- << getOpenMPClauseName(ClauseKind) << ERange;
- S.Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef;
- continue;
+ if (!ASE && !OASE) {
+ if (VD) {
+ VarDecl *VDDef = VD->getDefinition();
+ if (VD->getType()->isReferenceType() && VDDef && VDDef->hasInit()) {
+ DSARefChecker Check(Stack);
+ if (Check.Visit(VDDef->getInit())) {
+ S.Diag(ELoc, diag::err_omp_reduction_ref_type_arg)
+ << getOpenMPClauseName(ClauseKind) << ERange;
+ S.Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef;
+ continue;
+ }
}
}
- }
-
- // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
- // in a Construct]
- // Variables with the predetermined data-sharing attributes may not be
- // listed in data-sharing attributes clauses, except for the cases
- // listed below. For these exceptions only, listing a predetermined
- // variable in a data-sharing attribute clause is allowed and overrides
- // the variable's predetermined data-sharing attributes.
- // OpenMP [2.14.3.6, Restrictions, p.3]
- // Any number of reduction clauses can be specified on the directive,
- // but a list item can appear only once in the reduction clauses for that
- // directive.
- DSAStackTy::DSAVarData DVar = Stack->getTopDSA(D, /*FromParent=*/false);
- if (DVar.CKind == OMPC_reduction) {
- S.Diag(ELoc, diag::err_omp_once_referenced)
- << getOpenMPClauseName(ClauseKind);
- if (DVar.RefExpr)
- S.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_referenced);
- continue;
- }
- if (DVar.CKind != OMPC_unknown) {
- S.Diag(ELoc, diag::err_omp_wrong_dsa)
- << getOpenMPClauseName(DVar.CKind)
- << getOpenMPClauseName(OMPC_reduction);
- reportOriginalDsa(S, Stack, D, DVar);
- continue;
- }
- // OpenMP [2.14.3.6, Restrictions, p.1]
- // A list item that appears in a reduction clause of a worksharing
- // construct must be shared in the parallel regions to which any of the
- // worksharing regions arising from the worksharing construct bind.
- OpenMPDirectiveKind CurrDir = Stack->getCurrentDirective();
- if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir) &&
- !isOpenMPTeamsDirective(CurrDir)) {
- DVar = Stack->getImplicitDSA(D, true);
- if (DVar.CKind != OMPC_shared) {
- S.Diag(ELoc, diag::err_omp_required_access)
- << getOpenMPClauseName(OMPC_reduction)
- << getOpenMPClauseName(OMPC_shared);
+ // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced
+ // in a Construct]
+ // Variables with the predetermined data-sharing attributes may not be
+ // listed in data-sharing attributes clauses, except for the cases
+ // listed below. For these exceptions only, listing a predetermined
+ // variable in a data-sharing attribute clause is allowed and overrides
+ // the variable's predetermined data-sharing attributes.
+ // OpenMP [2.14.3.6, Restrictions, p.3]
+ // Any number of reduction clauses can be specified on the directive,
+ // but a list item can appear only once in the reduction clauses for that
+ // directive.
+ DSAStackTy::DSAVarData DVar = Stack->getTopDSA(D, /*FromParent=*/false);
+ if (DVar.CKind == OMPC_reduction) {
+ S.Diag(ELoc, diag::err_omp_once_referenced)
+ << getOpenMPClauseName(ClauseKind);
+ if (DVar.RefExpr)
+ S.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_referenced);
+ continue;
+ }
+ if (DVar.CKind != OMPC_unknown) {
+ S.Diag(ELoc, diag::err_omp_wrong_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_reduction);
reportOriginalDsa(S, Stack, D, DVar);
continue;
}
+
+ // OpenMP [2.14.3.6, Restrictions, p.1]
+ // A list item that appears in a reduction clause of a worksharing
+ // construct must be shared in the parallel regions to which any of the
+ // worksharing regions arising from the worksharing construct bind.
+ if (isOpenMPWorksharingDirective(CurrDir) &&
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
+ DVar = Stack->getImplicitDSA(D, true);
+ if (DVar.CKind != OMPC_shared) {
+ S.Diag(ELoc, diag::err_omp_required_access)
+ << getOpenMPClauseName(OMPC_reduction)
+ << getOpenMPClauseName(OMPC_shared);
+ reportOriginalDsa(S, Stack, D, DVar);
+ continue;
+ }
+ }
}
// Try to find 'declare reduction' corresponding construct before using
@@ -10430,7 +11085,7 @@ static bool actOnOMPReductionKindClause(
}
if (BOK == BO_Comma && DeclareReductionRef.isUnset()) {
// Not allowed reduction identifier is found.
- S.Diag(ReductionId.getLocStart(),
+ S.Diag(ReductionId.getBeginLoc(),
diag::err_omp_unknown_reduction_identifier)
<< Type << ReductionIdRange;
continue;
@@ -10683,26 +11338,27 @@ static bool actOnOMPReductionKindClause(
ELoc, Context.getPointerType(FnTy), VK_RValue, OK_Ordinary,
S.DefaultLvalueConversion(DeclareReductionRef.get()).get());
Expr *Args[] = {LHS.get(), RHS.get()};
- ReductionOp = new (Context)
- CallExpr(Context, OVE, Args, Context.VoidTy, VK_RValue, ELoc);
+ ReductionOp =
+ CallExpr::Create(Context, OVE, Args, Context.VoidTy, VK_RValue, ELoc);
} else {
ReductionOp = S.BuildBinOp(
- Stack->getCurScope(), ReductionId.getLocStart(), BOK, LHSDRE, RHSDRE);
+ Stack->getCurScope(), ReductionId.getBeginLoc(), BOK, LHSDRE, RHSDRE);
if (ReductionOp.isUsable()) {
if (BOK != BO_LT && BOK != BO_GT) {
ReductionOp =
- S.BuildBinOp(Stack->getCurScope(), ReductionId.getLocStart(),
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
BO_Assign, LHSDRE, ReductionOp.get());
} else {
auto *ConditionalOp = new (Context)
ConditionalOperator(ReductionOp.get(), ELoc, LHSDRE, ELoc, RHSDRE,
Type, VK_LValue, OK_Ordinary);
ReductionOp =
- S.BuildBinOp(Stack->getCurScope(), ReductionId.getLocStart(),
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
BO_Assign, LHSDRE, ConditionalOp);
}
if (ReductionOp.isUsable())
- ReductionOp = S.ActOnFinishFullExpr(ReductionOp.get());
+ ReductionOp = S.ActOnFinishFullExpr(ReductionOp.get(),
+ /*DiscardedValue*/ false);
}
if (!ReductionOp.isUsable())
continue;
@@ -10744,7 +11400,7 @@ static bool actOnOMPReductionKindClause(
EmitError = RedId != ParentRedId;
}
if (EmitError) {
- S.Diag(ReductionId.getLocStart(),
+ S.Diag(ReductionId.getBeginLoc(),
diag::err_omp_reduction_identifier_mismatch)
<< ReductionIdRange << RefExpr->getSourceRange();
S.Diag(ParentSR.getBegin(),
@@ -10895,20 +11551,12 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
}
Type = Type.getNonReferenceType();
- // A list item must not be const-qualified.
- if (Type.isConstant(Context)) {
- Diag(ELoc, diag::err_omp_const_variable)
- << getOpenMPClauseName(OMPC_linear);
- if (D) {
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
- Diag(D->getLocation(),
- IsDecl ? diag::note_previous_decl : diag::note_defined_here)
- << D;
- }
+ // OpenMP 5.0 [2.19.3, List Item Privatization, Restrictions]
+ // A variable that is privatized must not have a const-qualified type
+ // unless it is of class type with a mutable member. This restriction does
+ // not apply to the firstprivate clause.
+ if (rejectConstNotMutableType(*this, D, Type, OMPC_linear, ELoc))
return true;
- }
// A list item must be of integral or pointer type.
Type = Type.getUnqualifiedType().getCanonicalType();
@@ -10945,8 +11593,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
- /*AllowArraySection=*/false);
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -11028,7 +11675,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
if (Step && !Step->isValueDependent() && !Step->isTypeDependent() &&
!Step->isInstantiationDependent() &&
!Step->containsUnexpandedParameterPack()) {
- SourceLocation StepLoc = Step->getLocStart();
+ SourceLocation StepLoc = Step->getBeginLoc();
ExprResult Val = PerformOpenMPImplicitIntegerConversion(StepLoc, Step);
if (Val.isInvalid())
return nullptr;
@@ -11041,7 +11688,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc);
ExprResult CalcStep =
BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr);
- CalcStep = ActOnFinishFullExpr(CalcStep.get());
+ CalcStep = ActOnFinishFullExpr(CalcStep.get(), /*DiscardedValue*/ false);
// Warn about zero linear step (it would be probably better specified as
// making corresponding variables 'const').
@@ -11086,8 +11733,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange,
- /*AllowArraySection=*/false);
+ auto Res = getPrivateItem(SemaRef, SimpleRefExpr, ELoc, ERange);
ValueDecl *D = Res.first;
if (Res.second || !D) {
Updates.push_back(nullptr);
@@ -11129,8 +11775,8 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
InitExpr, IV, Step, /* Subtract */ false);
else
Update = *CurPrivate;
- Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getLocStart(),
- /*DiscardedValue=*/true);
+ Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getBeginLoc(),
+ /*DiscardedValue*/ false);
// Build final: Var = InitExpr + NumIterations * Step
ExprResult Final;
@@ -11140,8 +11786,8 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
InitExpr, NumIterations, Step, /*Subtract=*/false);
else
Final = *CurPrivate;
- Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getLocStart(),
- /*DiscardedValue=*/true);
+ Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getBeginLoc(),
+ /*DiscardedValue*/ false);
if (!Update.isUsable() || !Final.isUsable()) {
Updates.push_back(nullptr);
@@ -11168,8 +11814,7 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
- /*AllowArraySection=*/false);
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -11293,12 +11938,12 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
// operator for the class type.
QualType ElemType = Context.getBaseElementType(Type).getNonReferenceType();
VarDecl *SrcVD =
- buildVarDecl(*this, DE->getLocStart(), ElemType.getUnqualifiedType(),
+ buildVarDecl(*this, DE->getBeginLoc(), ElemType.getUnqualifiedType(),
".copyin.src", VD->hasAttrs() ? &VD->getAttrs() : nullptr);
DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(
*this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc());
VarDecl *DstVD =
- buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst",
+ buildVarDecl(*this, DE->getBeginLoc(), ElemType, ".copyin.dst",
VD->hasAttrs() ? &VD->getAttrs() : nullptr);
DeclRefExpr *PseudoDstExpr =
buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc());
@@ -11310,7 +11955,7 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
if (AssignmentOp.isInvalid())
continue;
AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(),
- /*DiscardedValue=*/true);
+ /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
@@ -11341,8 +11986,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
- /*AllowArraySection=*/false);
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -11408,19 +12052,19 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Type = Context.getBaseElementType(Type.getNonReferenceType())
.getUnqualifiedType();
VarDecl *SrcVD =
- buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.src",
+ buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.src",
D->hasAttrs() ? &D->getAttrs() : nullptr);
DeclRefExpr *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, ELoc);
VarDecl *DstVD =
- buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.dst",
+ buildVarDecl(*this, RefExpr->getBeginLoc(), Type, ".copyprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
DeclRefExpr *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
ExprResult AssignmentOp = BuildBinOp(
DSAStack->getCurScope(), ELoc, BO_Assign, PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
continue;
- AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), ELoc,
- /*DiscardedValue=*/true);
+ AssignmentOp =
+ ActOnFinishFullExpr(AssignmentOp.get(), ELoc, /*DiscardedValue*/ false);
if (AssignmentOp.isInvalid())
continue;
@@ -11477,8 +12121,9 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
- if (DepKind == OMPC_DEPEND_sink) {
- if (const Expr *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
+ if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
+ if (const Expr *OrderedCountExpr =
+ DSAStack->getParentOrderedRegionParam().first) {
TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
TotalDepCount.setIsUnsigned(/*Val=*/true);
}
@@ -11494,7 +12139,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
SourceLocation ELoc = RefExpr->getExprLoc();
Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
if (DepKind == OMPC_DEPEND_sink) {
- if (DSAStack->getParentOrderedRegionParam() &&
+ if (DSAStack->getParentOrderedRegionParam().first &&
DepCounter >= TotalDepCount) {
Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
continue;
@@ -11539,8 +12184,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
}
SourceLocation ELoc;
SourceRange ERange;
- auto Res = getPrivateItem(*this, LHS, ELoc, ERange,
- /*AllowArraySection=*/false);
+ auto Res = getPrivateItem(*this, LHS, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -11560,7 +12204,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
continue;
}
if (!CurContext->isDependentContext() &&
- DSAStack->getParentOrderedRegionParam() &&
+ DSAStack->getParentOrderedRegionParam().first &&
DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
const ValueDecl *VD =
DSAStack->getParentLoopControlVariable(DepCounter.getZExtValue());
@@ -11598,7 +12242,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
TotalDepCount > VarList.size() &&
- DSAStack->getParentOrderedRegionParam() &&
+ DSAStack->getParentOrderedRegionParam().first &&
DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
<< 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
@@ -11608,7 +12252,8 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
return nullptr;
auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepKind, DepLoc, ColonLoc, Vars);
+ DepKind, DepLoc, ColonLoc, Vars,
+ TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
@@ -11680,9 +12325,11 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// If there is a lower bound that does not evaluates to zero, we are not
// covering the whole dimension.
if (LowerBound) {
- llvm::APSInt ConstLowerBound;
- if (!LowerBound->EvaluateAsInt(ConstLowerBound, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!LowerBound->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+
+ llvm::APSInt ConstLowerBound = Result.Val.getInt();
if (ConstLowerBound.getSExtValue())
return true;
}
@@ -11702,10 +12349,11 @@ static bool checkArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
if (!CATy)
return false;
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return CATy->getSize().getSExtValue() != ConstLength.getSExtValue();
}
@@ -11736,10 +12384,11 @@ static bool checkArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
}
// Check if the length evaluates to 1.
- llvm::APSInt ConstLength;
- if (!Length->EvaluateAsInt(ConstLength, SemaRef.getASTContext()))
+ Expr::EvalResult Result;
+ if (!Length->EvaluateAsInt(Result, SemaRef.getASTContext()))
return false; // Can't get the integer value as a constant.
+ llvm::APSInt ConstLength = Result.Val.getInt();
return ConstLength.getSExtValue() != 1;
}
@@ -11896,6 +12545,19 @@ static const Expr *checkMapClauseExpressionBase(
E->getType()))
AllowWholeSizeArraySection = false;
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
+ Expr::EvalResult Result;
+ if (CurE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext())) {
+ if (!Result.Val.getInt().isNullValue()) {
+ SemaRef.Diag(CurE->getIdx()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(CurE->getIdx()->getExprLoc(),
+ diag::note_omp_invalid_subscript_on_this_ptr_map);
+ }
+ }
+ RelevantExpr = TE;
+ }
+
// Record the component - we don't have any declaration associated.
CurComponents.emplace_back(CurE, nullptr);
} else if (auto *CurE = dyn_cast<OMPArraySectionExpr>(E)) {
@@ -11942,6 +12604,30 @@ static const Expr *checkMapClauseExpressionBase(
return nullptr;
}
+ if (const auto *TE = dyn_cast<CXXThisExpr>(E)) {
+ Expr::EvalResult ResultR;
+ Expr::EvalResult ResultL;
+ if (CurE->getLength()->EvaluateAsInt(ResultR,
+ SemaRef.getASTContext())) {
+ if (!ResultR.Val.getInt().isOneValue()) {
+ SemaRef.Diag(CurE->getLength()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(CurE->getLength()->getExprLoc(),
+ diag::note_omp_invalid_length_on_this_ptr_mapping);
+ }
+ }
+ if (CurE->getLowerBound() && CurE->getLowerBound()->EvaluateAsInt(
+ ResultL, SemaRef.getASTContext())) {
+ if (!ResultL.Val.getInt().isNullValue()) {
+ SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
+ diag::err_omp_invalid_map_this_expr);
+ SemaRef.Diag(CurE->getLowerBound()->getExprLoc(),
+ diag::note_omp_invalid_lower_bound_on_this_ptr_mapping);
+ }
+ }
+ RelevantExpr = TE;
+ }
+
// Record the component - we don't have any declaration associated.
CurComponents.emplace_back(CurE, nullptr);
} else {
@@ -12129,6 +12815,26 @@ static bool checkMapConflicts(
// An expression is a subset of the other.
if (CurrentRegionOnly && (CI == CE || SI == SE)) {
if (CKind == OMPC_map) {
+ if (CI != CE || SI != SE) {
+ // Allow constructs like this: map(s, s.ptr[0:1]), where s.ptr is
+ // a pointer.
+ auto Begin =
+ CI != CE ? CurComponents.begin() : StackComponents.begin();
+ auto End = CI != CE ? CurComponents.end() : StackComponents.end();
+ auto It = Begin;
+ while (It != End && !It->getAssociatedDeclaration())
+ std::advance(It, 1);
+ assert(It != End &&
+ "Expected at least one component with the declaration.");
+ if (It != Begin && It->getAssociatedDeclaration()
+ ->getType()
+ .getCanonicalType()
+ ->isAnyPointerType()) {
+ IsEnclosedByDataEnvironmentExpr = false;
+ EnclosingExpr = nullptr;
+ return false;
+ }
+ }
SemaRef.Diag(ELoc, diag::err_omp_map_shared_storage) << ERange;
} else {
assert(CKind == OMPC_to || CKind == OMPC_from);
@@ -12259,6 +12965,18 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
assert(!CurComponents.empty() &&
"Invalid mappable expression information.");
+ if (const auto *TE = dyn_cast<CXXThisExpr>(BE)) {
+ // Add store "this" pointer to class in DSAStackTy for future checking
+ DSAS->addMappedClassesQualTypes(TE->getType());
+ // Skip restriction checking for variable or field declarations
+ MVLI.ProcessedVarList.push_back(RE);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().append(CurComponents.begin(),
+ CurComponents.end());
+ MVLI.VarBaseDeclarations.push_back(nullptr);
+ continue;
+ }
+
// For the following checks, we rely on the base declaration which is
// expected to be associated with the last component. The declaration is
// expected to be a variable or a field (if 'this' is being mapped).
@@ -12388,7 +13106,8 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
}
OMPClause *
-Sema::ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
+Sema::ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
@@ -12397,12 +13116,31 @@ Sema::ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, StartLoc,
MapType, IsMapTypeImplicit);
+ OpenMPMapModifierKind Modifiers[] = { OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown };
+ SourceLocation ModifiersLoc[OMPMapClause::NumberOfModifiers];
+
+ // Process map-type-modifiers, flag errors for duplicate modifiers.
+ unsigned Count = 0;
+ for (unsigned I = 0, E = MapTypeModifiers.size(); I < E; ++I) {
+ if (MapTypeModifiers[I] != OMPC_MAP_MODIFIER_unknown &&
+ llvm::find(Modifiers, MapTypeModifiers[I]) != std::end(Modifiers)) {
+ Diag(MapTypeModifiersLoc[I], diag::err_omp_duplicate_map_type_modifier);
+ continue;
+ }
+ assert(Count < OMPMapClause::NumberOfModifiers &&
+ "Modifiers exceed the allowed number of map type modifiers");
+ Modifiers[Count] = MapTypeModifiers[I];
+ ModifiersLoc[Count] = MapTypeModifiersLoc[I];
+ ++Count;
+ }
+
// We need to produce a map clause even if we don't have variables so that
// other diagnostics related with non-existing map clauses are accurate.
return OMPMapClause::Create(Context, StartLoc, LParenLoc, EndLoc,
MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
- MVLI.VarComponents, MapTypeModifier, MapType,
- IsMapTypeImplicit, MapLoc);
+ MVLI.VarComponents, Modifiers, ModifiersLoc,
+ MapType, IsMapTypeImplicit, MapLoc);
}
QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
@@ -12559,6 +13297,11 @@ void Sema::ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D) {
DRD->addDecl(OmpInParm);
DRD->addDecl(OmpOutParm);
}
+ Expr *InE =
+ ::buildDeclRefExpr(*this, OmpInParm, ReductionType, D->getLocation());
+ Expr *OutE =
+ ::buildDeclRefExpr(*this, OmpOutParm, ReductionType, D->getLocation());
+ DRD->setCombinerData(InE, OutE);
}
void Sema::ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner) {
@@ -12614,6 +13357,11 @@ VarDecl *Sema::ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D) {
DRD->addDecl(OmpPrivParm);
DRD->addDecl(OmpOrigParm);
}
+ Expr *OrigE =
+ ::buildDeclRefExpr(*this, OmpOrigParm, ReductionType, D->getLocation());
+ Expr *PrivE =
+ ::buildDeclRefExpr(*this, OmpPrivParm, ReductionType, D->getLocation());
+ DRD->setInitializerData(OrigE, PrivE);
return OmpPrivParm;
}
@@ -12785,7 +13533,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
if (!ChunkSize->isValueDependent() && !ChunkSize->isTypeDependent() &&
!ChunkSize->isInstantiationDependent() &&
!ChunkSize->containsUnexpandedParameterPack()) {
- SourceLocation ChunkSizeLoc = ChunkSize->getLocStart();
+ SourceLocation ChunkSizeLoc = ChunkSize->getBeginLoc();
ExprResult Val =
PerformOpenMPImplicitIntegerConversion(ChunkSizeLoc, ChunkSize);
if (Val.isInvalid())
@@ -12861,19 +13609,14 @@ bool Sema::ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc) {
Diag(Loc, diag::err_omp_region_not_file_context);
return false;
}
- if (IsInOpenMPDeclareTargetContext) {
- Diag(Loc, diag::err_omp_enclosed_declare_target);
- return false;
- }
-
- IsInOpenMPDeclareTargetContext = true;
+ ++DeclareTargetNestingLevel;
return true;
}
void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
- assert(IsInOpenMPDeclareTargetContext &&
+ assert(DeclareTargetNestingLevel > 0 &&
"Unexpected ActOnFinishOpenMPDeclareTargetDirective");
- IsInOpenMPDeclareTargetContext = false;
+ --DeclareTargetNestingLevel;
}
void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
@@ -12904,16 +13647,20 @@ void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
}
NamedDecl *ND = Lookup.getAsSingle<NamedDecl>();
- if (isa<VarDecl>(ND) || isa<FunctionDecl>(ND)) {
+ if (isa<VarDecl>(ND) || isa<FunctionDecl>(ND) ||
+ isa<FunctionTemplateDecl>(ND)) {
if (!SameDirectiveDecls.insert(cast<NamedDecl>(ND->getCanonicalDecl())))
Diag(Id.getLoc(), diag::err_omp_declare_target_multiple) << Id.getName();
- if (!ND->hasAttr<OMPDeclareTargetDeclAttr>()) {
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
+ cast<ValueDecl>(ND));
+ if (!Res) {
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT);
ND->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Id.getLoc());
- } else if (ND->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() != MT) {
+ } else if (*Res != MT) {
Diag(Id.getLoc(), diag::err_omp_declare_target_to_and_link)
<< Id.getName();
}
@@ -12924,79 +13671,13 @@ void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
Sema &SemaRef, Decl *D) {
- if (!D)
+ if (!D || !isa<VarDecl>(D))
return;
- const Decl *LD = nullptr;
- if (isa<TagDecl>(D)) {
- LD = cast<TagDecl>(D)->getDefinition();
- } else if (isa<VarDecl>(D)) {
- LD = cast<VarDecl>(D)->getDefinition();
-
- // If this is an implicit variable that is legal and we do not need to do
- // anything.
- if (cast<VarDecl>(D)->isImplicit()) {
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
- D->addAttr(A);
- if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
- return;
- }
- } else if (const auto *F = dyn_cast<FunctionDecl>(D)) {
- const FunctionDecl *FD = nullptr;
- if (cast<FunctionDecl>(D)->hasBody(FD)) {
- LD = FD;
- // If the definition is associated with the current declaration in the
- // target region (it can be e.g. a lambda) that is legal and we do not
- // need to do anything else.
- if (LD == D) {
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
- D->addAttr(A);
- if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
- return;
- }
- } else if (F->isFunctionTemplateSpecialization() &&
- F->getTemplateSpecializationKind() ==
- TSK_ImplicitInstantiation) {
- // Check if the function is implicitly instantiated from the template
- // defined in the declare target region.
- const FunctionTemplateDecl *FTD = F->getPrimaryTemplate();
- if (FTD && FTD->hasAttr<OMPDeclareTargetDeclAttr>())
- return;
- }
- }
- if (!LD)
- LD = D;
- if (LD && !LD->hasAttr<OMPDeclareTargetDeclAttr>() &&
- ((isa<VarDecl>(LD) && !isa<ParmVarDecl>(LD)) || isa<FunctionDecl>(LD))) {
- // Outlined declaration is not declared target.
- if (!isa<FunctionDecl>(LD)) {
- if (LD->isOutOfLine()) {
- SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
- SemaRef.Diag(SL, diag::note_used_here) << SR;
- } else {
- const DeclContext *DC = LD->getDeclContext();
- while (DC &&
- (!isa<FunctionDecl>(DC) ||
- !cast<FunctionDecl>(DC)->hasAttr<OMPDeclareTargetDeclAttr>()))
- DC = DC->getParent();
- if (DC)
- return;
-
- // Is not declared in target context.
- SemaRef.Diag(LD->getLocation(), diag::warn_omp_not_in_target_context);
- SemaRef.Diag(SL, diag::note_used_here) << SR;
- }
- }
- // Mark decl as declared target to prevent further diagnostic.
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- SemaRef.Context, OMPDeclareTargetDeclAttr::MT_To);
- D->addAttr(A);
- if (ASTMutationListener *ML = SemaRef.Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
- }
+ auto *VD = cast<VarDecl>(D);
+ if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ return;
+ SemaRef.Diag(VD->getLocation(), diag::warn_omp_not_in_target_context);
+ SemaRef.Diag(SL, diag::note_used_here) << SR;
}
static bool checkValueDeclInTarget(SourceLocation SL, SourceRange SR,
@@ -13012,10 +13693,11 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
if (!D || D->isInvalidDecl())
return;
SourceRange SR = E ? E->getSourceRange() : D->getSourceRange();
- SourceLocation SL = E ? E->getLocStart() : D->getLocation();
+ SourceLocation SL = E ? E->getBeginLoc() : D->getLocation();
if (auto *VD = dyn_cast<VarDecl>(D)) {
// Only global variables can be marked as declare target.
- if (VD->isLocalVarDeclOrParm())
+ if (!VD->isFileVarDecl() && !VD->isStaticLocal() &&
+ !VD->isStaticDataMember())
return;
// 2.10.6: threadprivate variable cannot appear in a declare target
// directive.
@@ -13025,56 +13707,39 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
}
}
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ D = FTD->getTemplatedDecl();
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD);
+ if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
+ assert(IdLoc.isValid() && "Source location is expected");
+ Diag(IdLoc, diag::err_omp_function_in_link_clause);
+ Diag(FD->getLocation(), diag::note_defined_here) << FD;
+ return;
+ }
+ }
if (auto *VD = dyn_cast<ValueDecl>(D)) {
// Problem if any with var declared with incomplete type will be reported
// as normal, so no need to check it here.
if ((E || !VD->getType()->isIncompleteType()) &&
- !checkValueDeclInTarget(SL, SR, *this, DSAStack, VD)) {
- // Mark decl as declared target to prevent further diagnostic.
- if (isa<VarDecl>(VD) || isa<FunctionDecl>(VD) ||
- isa<FunctionTemplateDecl>(VD)) {
+ !checkValueDeclInTarget(SL, SR, *this, DSAStack, VD))
+ return;
+ if (!E && !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
+ // Checking declaration inside declare target region.
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D) ||
+ isa<FunctionTemplateDecl>(D)) {
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
Context, OMPDeclareTargetDeclAttr::MT_To);
- VD->addAttr(A);
+ D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(VD, A);
+ ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
}
return;
}
}
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->hasAttr<OMPDeclareTargetDeclAttr>() &&
- (FD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
- OMPDeclareTargetDeclAttr::MT_Link)) {
- assert(IdLoc.isValid() && "Source location is expected");
- Diag(IdLoc, diag::err_omp_function_in_link_clause);
- Diag(FD->getLocation(), diag::note_defined_here) << FD;
- return;
- }
- }
- if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D)) {
- if (FTD->hasAttr<OMPDeclareTargetDeclAttr>() &&
- (FTD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
- OMPDeclareTargetDeclAttr::MT_Link)) {
- assert(IdLoc.isValid() && "Source location is expected");
- Diag(IdLoc, diag::err_omp_function_in_link_clause);
- Diag(FTD->getLocation(), diag::note_defined_here) << FTD;
- return;
- }
- }
- if (!E) {
- // Checking declaration inside declare target region.
- if (!D->hasAttr<OMPDeclareTargetDeclAttr>() &&
- (isa<VarDecl>(D) || isa<FunctionDecl>(D) ||
- isa<FunctionTemplateDecl>(D))) {
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, OMPDeclareTargetDeclAttr::MT_To);
- D->addAttr(A);
- if (ASTMutationListener *ML = Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
- }
+ if (!E)
return;
- }
checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D);
}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 08af485ef4c7..52be0598fbc0 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -63,8 +63,8 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
return ExprError();
if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
S.ResolveExceptionSpec(Loc, FPT);
- DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
- VK_LValue, Loc, LocInfo);
+ DeclRefExpr *DRE = new (S.Context)
+ DeclRefExpr(S.Context, Fn, false, Fn->getType(), VK_LValue, Loc, LocInfo);
if (HadMultipleCandidates)
DRE->setHadMultipleCandidates(true);
@@ -1041,6 +1041,36 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
}
}
+ // C++ [temp.friend]p1:
+ // For a friend function declaration that is not a template declaration:
+ // -- if the name of the friend is a qualified or unqualified template-id,
+ // [...], otherwise
+ // -- if the name of the friend is a qualified-id and a matching
+ // non-template function is found in the specified class or namespace,
+ // the friend declaration refers to that function, otherwise,
+ // -- if the name of the friend is a qualified-id and a matching function
+ // template is found in the specified class or namespace, the friend
+ // declaration refers to the deduced specialization of that function
+ // template, otherwise
+ // -- the name shall be an unqualified-id [...]
+ // If we get here for a qualified friend declaration, we've just reached the
+ // third bullet. If the type of the friend is dependent, skip this lookup
+ // until instantiation.
+ if (New->getFriendObjectKind() && New->getQualifier() &&
+ !New->getDependentSpecializationInfo() &&
+ !New->getType()->isDependentType()) {
+ LookupResult TemplateSpecResult(LookupResult::Temporary, Old);
+ TemplateSpecResult.addAllDecls(Old);
+ if (CheckFunctionTemplateSpecialization(New, nullptr, TemplateSpecResult,
+ /*QualifiedFriend*/true)) {
+ New->setInvalidDecl();
+ return Ovl_Overload;
+ }
+
+ Match = TemplateSpecResult.getAsSingle<FunctionDecl>();
+ return Ovl_Match;
+ }
+
return Ovl_Overload;
}
@@ -1105,7 +1135,8 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
(!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
false, TPL_TemplateMatch) ||
- OldType->getReturnType() != NewType->getReturnType()))
+ !Context.hasSameType(Old->getDeclaredReturnType(),
+ New->getDeclaredReturnType())))
return true;
// If the function is a class member, its signature includes the
@@ -1141,8 +1172,9 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
// function yet (because we haven't yet resolved whether this is a static
// or non-static member function). Add it now, on the assumption that this
// is a redeclaration of OldMethod.
- unsigned OldQuals = OldMethod->getTypeQualifiers();
- unsigned NewQuals = NewMethod->getTypeQualifiers();
+ // FIXME: OpenCL: Need to consider address spaces
+ unsigned OldQuals = OldMethod->getTypeQualifiers().getCVRUQualifiers();
+ unsigned NewQuals = NewMethod->getTypeQualifiers().getCVRUQualifiers();
if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
!isa<CXXConstructorDecl>(NewMethod))
NewQuals |= Qualifiers::Const;
@@ -1263,7 +1295,7 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
= S.Context.getCanonicalType(ToType).getUnqualifiedType();
if (Constructor->isCopyConstructor() &&
(FromCanon == ToCanon ||
- S.IsDerivedFrom(From->getLocStart(), FromCanon, ToCanon))) {
+ S.IsDerivedFrom(From->getBeginLoc(), FromCanon, ToCanon))) {
// Turn this into a "standard" conversion sequence, so that it
// gets ranked with standard conversion sequences.
DeclAccessPair Found = ICS.UserDefined.FoundConversionFunction;
@@ -1355,7 +1387,7 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
QualType FromType = From->getType();
if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
(S.Context.hasSameUnqualifiedType(FromType, ToType) ||
- S.IsDerivedFrom(From->getLocStart(), FromType, ToType))) {
+ S.IsDerivedFrom(From->getBeginLoc(), FromType, ToType))) {
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
ICS.Standard.setFromType(FromType);
@@ -1417,9 +1449,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
bool AllowObjCWritebackConversion
= getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
- if (getLangOpts().ObjC1)
- CheckObjCBridgeRelatedConversions(From->getLocStart(),
- ToType, From->getType(), From);
+ if (getLangOpts().ObjC)
+ CheckObjCBridgeRelatedConversions(From->getBeginLoc(), ToType,
+ From->getType(), From);
ICS = ::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
AllowExplicit,
@@ -2011,7 +2043,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
- isCompleteType(From->getLocStart(), FromType))
+ isCompleteType(From->getBeginLoc(), FromType))
return Context.hasSameUnqualifiedType(
ToType, FromEnumType->getDecl()->getPromotionType());
@@ -2353,10 +2385,10 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
//
// Note that we do not check for ambiguity or inaccessibility
// here. That is handled by CheckPointerConversion.
- if (getLangOpts().CPlusPlus &&
- FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ if (getLangOpts().CPlusPlus && FromPointeeType->isRecordType() &&
+ ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
- IsDerivedFrom(From->getLocStart(), FromPointeeType, ToPointeeType)) {
+ IsDerivedFrom(From->getBeginLoc(), FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
@@ -2394,7 +2426,7 @@ static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
- if (!getLangOpts().ObjC1)
+ if (!getLangOpts().ObjC)
return false;
// The set of qualifiers on the type we're converting from.
@@ -2822,10 +2854,9 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
return;
}
- unsigned FromQuals = FromFunction->getTypeQuals(),
- ToQuals = ToFunction->getTypeQuals();
- if (FromQuals != ToQuals) {
- PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
+ if (FromFunction->getTypeQuals() != ToFunction->getTypeQuals()) {
+ PDiag << ft_qualifer_mismatch << ToFunction->getTypeQuals()
+ << FromFunction->getTypeQuals();
return;
}
@@ -2983,7 +3014,7 @@ bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
QualType ToClass(ToTypePtr->getClass(), 0);
if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
- IsDerivedFrom(From->getLocStart(), ToClass, FromClass)) {
+ IsDerivedFrom(From->getBeginLoc(), ToClass, FromClass)) {
ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
ToClass.getTypePtr());
return true;
@@ -3027,7 +3058,7 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
bool DerivationOkay =
- IsDerivedFrom(From->getLocStart(), ToClass, FromClass, Paths);
+ IsDerivedFrom(From->getBeginLoc(), ToClass, FromClass, Paths);
assert(DerivationOkay &&
"Should not have been called if derivation isn't OK.");
(void)DerivationOkay;
@@ -3242,13 +3273,12 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
OverloadCandidateSet::iterator Best;
switch (auto Result =
- CandidateSet.BestViableFunction(S, From->getLocStart(),
- Best)) {
+ CandidateSet.BestViableFunction(S, From->getBeginLoc(), Best)) {
case OR_Deleted:
case OR_Success: {
// Record the standard conversion we used and the conversion function.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
- QualType ThisType = Constructor->getThisType(S.Context);
+ QualType ThisType = Constructor->getThisType();
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
User.HadMultipleCandidates = HadMultipleCandidates;
@@ -3308,7 +3338,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// the parentheses of the initializer.
if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
(From->getType()->getAs<RecordType>() &&
- S.IsDerivedFrom(From->getLocStart(), From->getType(), ToType)))
+ S.IsDerivedFrom(From->getBeginLoc(), From->getType(), ToType)))
ConstructorsOnly = true;
if (!S.isCompleteType(From->getExprLoc(), ToType)) {
@@ -3376,10 +3406,10 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Enumerate conversion functions, if we're allowed to.
if (ConstructorsOnly || isa<InitListExpr>(From)) {
- } else if (!S.isCompleteType(From->getLocStart(), From->getType())) {
+ } else if (!S.isCompleteType(From->getBeginLoc(), From->getType())) {
// No conversion functions from incomplete types.
- } else if (const RecordType *FromRecordType
- = From->getType()->getAs<RecordType>()) {
+ } else if (const RecordType *FromRecordType =
+ From->getType()->getAs<RecordType>()) {
if (CXXRecordDecl *FromRecordDecl
= dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
// Add all of the conversion functions as candidates.
@@ -3416,8 +3446,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
- switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(),
- Best)) {
+ switch (auto Result =
+ CandidateSet.BestViableFunction(S, From->getBeginLoc(), Best)) {
case OR_Success:
case OR_Deleted:
// Record the standard conversion we used and the conversion function.
@@ -3429,7 +3459,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// sequence converts the source type to the type required by
// the argument of the constructor.
//
- QualType ThisType = Constructor->getThisType(S.Context);
+ QualType ThisType = Constructor->getThisType();
if (isa<InitListExpr>(From)) {
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
@@ -3496,13 +3526,13 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
CandidateSet, false, false);
if (OvResult == OR_Ambiguous)
- Diag(From->getLocStart(), diag::err_typecheck_ambiguous_condition)
+ Diag(From->getBeginLoc(), diag::err_typecheck_ambiguous_condition)
<< From->getType() << ToType << From->getSourceRange();
else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) {
- if (!RequireCompleteType(From->getLocStart(), ToType,
+ if (!RequireCompleteType(From->getBeginLoc(), ToType,
diag::err_typecheck_nonviable_condition_incomplete,
From->getType(), From->getSourceRange()))
- Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition)
+ Diag(From->getBeginLoc(), diag::err_typecheck_nonviable_condition)
<< false << From->getType() << From->getSourceRange() << ToType;
} else
return false;
@@ -3516,7 +3546,7 @@ Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
static ImplicitConversionSequence::CompareKind
compareConversionFunctions(Sema &S, FunctionDecl *Function1,
FunctionDecl *Function2) {
- if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11)
+ if (!S.getLangOpts().ObjC || !S.getLangOpts().CPlusPlus11)
return ImplicitConversionSequence::Indistinguishable;
// Objective-C++:
@@ -3900,6 +3930,31 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
S.Context.getTypeSize(SCS1.getToType(2)))
return ImplicitConversionSequence::Better;
+ // Prefer a compatible vector conversion over a lax vector conversion
+ // For example:
+ //
+ // typedef float __v4sf __attribute__((__vector_size__(16)));
+ // void f(vector float);
+ // void f(vector signed int);
+ // int main() {
+ // __v4sf a;
+ // f(a);
+ // }
+ // Here, we'd like to choose f(vector float) and not
+ // report an ambiguous call error
+ if (SCS1.Second == ICK_Vector_Conversion &&
+ SCS2.Second == ICK_Vector_Conversion) {
+ bool SCS1IsCompatibleVectorConversion = S.Context.areCompatibleVectorTypes(
+ SCS1.getFromType(), SCS1.getToType(2));
+ bool SCS2IsCompatibleVectorConversion = S.Context.areCompatibleVectorTypes(
+ SCS2.getFromType(), SCS2.getToType(2));
+
+ if (SCS1IsCompatibleVectorConversion != SCS2IsCompatibleVectorConversion)
+ return SCS1IsCompatibleVectorConversion
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -4750,7 +4805,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// We need a complete type for what follows. Incomplete types can never be
// initialized from init lists.
- if (!S.isCompleteType(From->getLocStart(), ToType))
+ if (!S.isCompleteType(From->getBeginLoc(), ToType))
return Result;
// Per DR1467:
@@ -4767,7 +4822,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
if (ToType->isRecordType()) {
QualType InitType = From->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
- S.IsDerivedFrom(From->getLocStart(), InitType, ToType))
+ S.IsDerivedFrom(From->getBeginLoc(), InitType, ToType))
return TryCopyInitialization(S, From->getInit(0), ToType,
SuppressUserConversions,
InOverloadResolution,
@@ -4823,10 +4878,9 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
break;
}
// Otherwise, look for the worst conversion.
- if (Result.isBad() ||
- CompareImplicitConversionSequences(S, From->getLocStart(), ICS,
- Result) ==
- ImplicitConversionSequence::Worse)
+ if (Result.isBad() || CompareImplicitConversionSequences(
+ S, From->getBeginLoc(), ICS, Result) ==
+ ImplicitConversionSequence::Worse)
Result = ICS;
}
@@ -4920,12 +4974,12 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
bool dummy1 = false;
bool dummy2 = false;
bool dummy3 = false;
- Sema::ReferenceCompareResult RefRelationship
- = S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1,
+ Sema::ReferenceCompareResult RefRelationship =
+ S.CompareReferenceRelationship(From->getBeginLoc(), T1, T2, dummy1,
dummy2, dummy3);
if (RefRelationship >= Sema::Ref_Related) {
- return TryReferenceInit(S, Init, ToType, /*FIXME*/From->getLocStart(),
+ return TryReferenceInit(S, Init, ToType, /*FIXME*/ From->getBeginLoc(),
SuppressUserConversions,
/*AllowExplicit=*/false);
}
@@ -5006,9 +5060,8 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
if (ToType->isReferenceType())
return TryReferenceInit(S, From, ToType,
- /*FIXME:*/From->getLocStart(),
- SuppressUserConversions,
- AllowExplicit);
+ /*FIXME:*/ From->getBeginLoc(),
+ SuppressUserConversions, AllowExplicit);
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
@@ -5042,9 +5095,15 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
// [class.dtor]p2: A destructor can be invoked for a const, volatile or
// const volatile object.
- unsigned Quals = isa<CXXDestructorDecl>(Method) ?
- Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers();
- QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals);
+ Qualifiers Quals;
+ if (isa<CXXDestructorDecl>(Method)) {
+ Quals.addConst();
+ Quals.addVolatile();
+ } else {
+ Quals = Method->getTypeQualifiers();
+ }
+
+ QualType ImplicitParamType = S.Context.getQualifiedType(ClassType, Quals);
// Set up the conversion sequence as a "bad" conversion, to allow us
// to exit early.
@@ -5110,7 +5169,7 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
break;
case RQ_LValue:
- if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
+ if (!FromClassification.isLValue() && !Quals.hasOnlyConst()) {
// non-const lvalue reference cannot bind to an rvalue
ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
ImplicitParamType);
@@ -5154,12 +5213,12 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
QualType ImplicitParamRecordType =
- Method->getThisType(Context)->getAs<PointerType>()->getPointeeType();
+ Method->getThisType()->getAs<PointerType>()->getPointeeType();
Expr::Classification FromClassification;
if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
FromRecordType = PT->getPointeeType();
- DestType = Method->getThisType(Context);
+ DestType = Method->getThisType();
FromClassification = Expr::Classification::makeSimpleLValue();
} else {
FromRecordType = From->getType();
@@ -5177,7 +5236,7 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
// Note that we always use the true parent context when performing
// the actual argument initialization.
ImplicitConversionSequence ICS = TryObjectArgumentInitialization(
- *this, From->getLocStart(), From->getType(), FromClassification, Method,
+ *this, From->getBeginLoc(), From->getType(), FromClassification, Method,
Method->getParent());
if (ICS.isBad()) {
switch (ICS.Bad.Kind) {
@@ -5186,10 +5245,9 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
Qualifiers ToQs = DestType.getQualifiers();
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
if (CVR) {
- Diag(From->getLocStart(),
- diag::err_member_function_call_bad_cvr)
- << Method->getDeclName() << FromRecordType << (CVR - 1)
- << From->getSourceRange();
+ Diag(From->getBeginLoc(), diag::err_member_function_call_bad_cvr)
+ << Method->getDeclName() << FromRecordType << (CVR - 1)
+ << From->getSourceRange();
Diag(Method->getLocation(), diag::note_previous_decl)
<< Method->getDeclName();
return ExprError();
@@ -5201,9 +5259,9 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
case BadConversionSequence::rvalue_ref_to_lvalue: {
bool IsRValueQualified =
Method->getRefQualifier() == RefQualifierKind::RQ_RValue;
- Diag(From->getLocStart(), diag::err_member_function_call_bad_ref)
- << Method->getDeclName() << FromClassification.isRValue()
- << IsRValueQualified;
+ Diag(From->getBeginLoc(), diag::err_member_function_call_bad_ref)
+ << Method->getDeclName() << FromClassification.isRValue()
+ << IsRValueQualified;
Diag(Method->getLocation(), diag::note_previous_decl)
<< Method->getDeclName();
return ExprError();
@@ -5214,9 +5272,9 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
break;
}
- return Diag(From->getLocStart(),
- diag::err_member_function_call_bad_type)
- << ImplicitParamRecordType << FromRecordType << From->getSourceRange();
+ return Diag(From->getBeginLoc(), diag::err_member_function_call_bad_type)
+ << ImplicitParamRecordType << FromRecordType
+ << From->getSourceRange();
}
if (ICS.Standard.Second == ICK_Derived_To_Base) {
@@ -5227,9 +5285,14 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
From = FromRes.get();
}
- if (!Context.hasSameType(From->getType(), DestType))
- From = ImpCastExprToType(From, DestType, CK_NoOp,
+ if (!Context.hasSameType(From->getType(), DestType)) {
+ if (From->getType().getAddressSpace() != DestType.getAddressSpace())
+ From = ImpCastExprToType(From, DestType, CK_AddressSpaceConversion,
From->getValueKind()).get();
+ else
+ From = ImpCastExprToType(From, DestType, CK_NoOp,
+ From->getValueKind()).get();
+ }
return From;
}
@@ -5257,9 +5320,8 @@ ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
- return Diag(From->getLocStart(),
- diag::err_typecheck_bool_condition)
- << From->getType() << From->getSourceRange();
+ return Diag(From->getBeginLoc(), diag::err_typecheck_bool_condition)
+ << From->getType() << From->getSourceRange();
return ExprError();
}
@@ -5372,9 +5434,9 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::BadConversion:
if (!S.DiagnoseMultipleUserDefinedConversion(From, T))
- return S.Diag(From->getLocStart(),
+ return S.Diag(From->getBeginLoc(),
diag::err_typecheck_converted_constant_expression)
- << From->getType() << From->getSourceRange() << T;
+ << From->getType() << From->getSourceRange() << T;
return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
@@ -5383,15 +5445,15 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// Check that we would only use permitted conversions.
if (!CheckConvertedConstantConversions(S, *SCS)) {
- return S.Diag(From->getLocStart(),
+ return S.Diag(From->getBeginLoc(),
diag::err_typecheck_converted_constant_expression_disallowed)
- << From->getType() << From->getSourceRange() << T;
+ << From->getType() << From->getSourceRange() << T;
}
// [...] and where the reference binding (if any) binds directly.
if (SCS->ReferenceBinding && !SCS->DirectBinding) {
- return S.Diag(From->getLocStart(),
+ return S.Diag(From->getBeginLoc(),
diag::err_typecheck_converted_constant_expression_indirect)
- << From->getType() << From->getSourceRange() << T;
+ << From->getType() << From->getSourceRange() << T;
}
ExprResult Result =
@@ -5414,14 +5476,14 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
break;
case NK_Constant_Narrowing:
- S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
- << CCE << /*Constant*/1
- << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
+ S.Diag(From->getBeginLoc(), diag::ext_cce_narrowing)
+ << CCE << /*Constant*/ 1
+ << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
break;
case NK_Type_Narrowing:
- S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
- << CCE << /*Constant*/0 << From->getType() << T;
+ S.Diag(From->getBeginLoc(), diag::ext_cce_narrowing)
+ << CCE << /*Constant*/ 0 << From->getType() << T;
break;
}
@@ -5448,7 +5510,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
- return Result;
+ return ConstantExpr::Create(S.Context, Result.get());
}
}
@@ -5457,8 +5519,8 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
else {
- S.Diag(From->getLocStart(), diag::err_expr_not_cce)
- << CCE << From->getSourceRange();
+ S.Diag(From->getBeginLoc(), diag::err_expr_not_cce)
+ << CCE << From->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
S.Diag(Notes[I].first, Notes[I].second);
}
@@ -5586,10 +5648,10 @@ diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
ConvTy.getAsStringInternal(TypeStr, SemaRef.getPrintingPolicy());
Converter.diagnoseExplicitConv(SemaRef, Loc, T, ConvTy)
- << FixItHint::CreateInsertion(From->getLocStart(),
+ << FixItHint::CreateInsertion(From->getBeginLoc(),
"static_cast<" + TypeStr + ">(")
<< FixItHint::CreateInsertion(
- SemaRef.getLocForEndOfToken(From->getLocEnd()), ")");
+ SemaRef.getLocForEndOfToken(From->getEndLoc()), ")");
Converter.noteExplicitConv(SemaRef, Conversion, ConvTy);
// If we aren't in a SFINAE context, build a call to the
@@ -5925,15 +5987,13 @@ static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
/// \param PartialOverloading true if we are performing "partial" overloading
/// based on an incomplete set of function arguments. This feature is used by
/// code completion.
-void
-Sema::AddOverloadCandidate(FunctionDecl *Function,
- DeclAccessPair FoundDecl,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet &CandidateSet,
- bool SuppressUserConversions,
- bool PartialOverloading,
- bool AllowExplicit,
- ConversionSequenceList EarlyConversions) {
+void Sema::AddOverloadCandidate(FunctionDecl *Function,
+ DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading, bool AllowExplicit,
+ ADLCallKind IsADLCandidate,
+ ConversionSequenceList EarlyConversions) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -5992,6 +6052,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.Function = Function;
Candidate.Viable = true;
Candidate.IsSurrogate = false;
+ Candidate.IsADLCandidate = IsADLCandidate;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
@@ -6009,7 +6070,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
- IsDerivedFrom(Args[0]->getLocStart(), Args[0]->getType(),
+ IsDerivedFrom(Args[0]->getBeginLoc(), Args[0]->getType(),
ClassType))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_illegal_constructor;
@@ -6215,24 +6276,6 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
return nullptr;
}
-// specific_attr_iterator iterates over enable_if attributes in reverse, and
-// enable_if is order-sensitive. As a result, we need to reverse things
-// sometimes. Size of 4 elements is arbitrary.
-static SmallVector<EnableIfAttr *, 4>
-getOrderedEnableIfAttrs(const FunctionDecl *Function) {
- SmallVector<EnableIfAttr *, 4> Result;
- if (!Function->hasAttrs())
- return Result;
-
- const auto &FuncAttrs = Function->getAttrs();
- for (Attr *Attr : FuncAttrs)
- if (auto *EnableIf = dyn_cast<EnableIfAttr>(Attr))
- Result.push_back(EnableIf);
-
- std::reverse(Result.begin(), Result.end());
- return Result;
-}
-
static bool
convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap,
@@ -6306,9 +6349,8 @@ convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis) {
- SmallVector<EnableIfAttr *, 4> EnableIfAttrs =
- getOrderedEnableIfAttrs(Function);
- if (EnableIfAttrs.empty())
+ auto EnableIfAttrs = Function->specific_attrs<EnableIfAttr>();
+ if (EnableIfAttrs.begin() == EnableIfAttrs.end())
return nullptr;
SFINAETrap Trap(*this);
@@ -6318,7 +6360,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
if (!convertArgsForAvailabilityChecks(
*this, Function, /*ThisArg=*/nullptr, Args, Trap,
/*MissingImplicitThis=*/true, DiscardedThis, ConvertedArgs))
- return EnableIfAttrs[0];
+ return *EnableIfAttrs.begin();
for (auto *EIA : EnableIfAttrs) {
APValue Result;
@@ -6427,7 +6469,12 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
if (Expr *E = Args[0]) {
// Use the explicit base to restrict the lookup:
ObjectType = E->getType();
- ObjectClassification = E->Classify(Context);
+ // Pointers in the object arguments are implicitly dereferenced, so we
+ // always classify them as l-values.
+ if (!ObjectType.isNull() && ObjectType->isPointerType())
+ ObjectClassification = Expr::Classification::makeSimpleLValue();
+ else
+ ObjectClassification = E->Classify(Context);
} // .. else there is an implicit base.
FunctionArgs = Args.slice(1);
}
@@ -6708,14 +6755,11 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
/// Add a C++ function template specialization as a candidate
/// in the candidate set, using template argument deduction to produce
/// an appropriate function template specialization.
-void
-Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
- DeclAccessPair FoundDecl,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet& CandidateSet,
- bool SuppressUserConversions,
- bool PartialOverloading) {
+void Sema::AddTemplateOverloadCandidate(
+ FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
+ bool PartialOverloading, ADLCallKind IsADLCandidate) {
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
@@ -6744,6 +6788,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.IsSurrogate = false;
+ Candidate.IsADLCandidate = IsADLCandidate;
// Ignore the object argument if there is one, since we don't have an object
// type.
Candidate.IgnoreObjectArgument =
@@ -6765,7 +6810,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
assert(Specialization && "Missing function template specialization?");
AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
SuppressUserConversions, PartialOverloading,
- /*AllowExplicit*/false, Conversions);
+ /*AllowExplicit*/ false, IsADLCandidate, Conversions);
}
/// Check that implicit conversion sequences can be formed for each argument
@@ -6966,15 +7011,15 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// lvalues/rvalues and the type. Fortunately, we can allocate this
// call on the stack and we don't need its arguments to be
// well-formed.
- DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(),
- VK_LValue, From->getLocStart());
+ DeclRefExpr ConversionRef(Context, Conversion, false, Conversion->getType(),
+ VK_LValue, From->getBeginLoc());
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
CK_FunctionToPointerDecay,
&ConversionRef, VK_RValue);
QualType ConversionType = Conversion->getConversionType();
- if (!isCompleteType(From->getLocStart(), ConversionType)) {
+ if (!isCompleteType(From->getBeginLoc(), ConversionType)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
@@ -6986,13 +7031,17 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
QualType CallResultType = ConversionType.getNonLValueExprType(Context);
- CallExpr Call(Context, &ConversionFn, None, CallResultType, VK,
- From->getLocStart());
+
+ llvm::AlignedCharArray<alignof(CallExpr), sizeof(CallExpr) + sizeof(Stmt *)>
+ Buffer;
+ CallExpr *TheTemporaryCall = CallExpr::CreateTemporary(
+ Buffer.buffer, &ConversionFn, CallResultType, VK, From->getBeginLoc());
+
ImplicitConversionSequence ICS =
- TryCopyInitialization(*this, &Call, ToType,
- /*SuppressUserConversions=*/true,
- /*InOverloadResolution=*/false,
- /*AllowObjCWritebackConversion=*/false);
+ TryCopyInitialization(*this, TheTemporaryCall, ToType,
+ /*SuppressUserConversions=*/true,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/false);
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
@@ -8928,16 +8977,20 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
// set.
for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none);
+
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
if (ExplicitTemplateArgs)
continue;
- AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
- PartialOverloading);
- } else
- AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
- FoundDecl, ExplicitTemplateArgs,
- Args, CandidateSet, PartialOverloading);
+ AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet,
+ /*SupressUserConversions=*/false, PartialOverloading,
+ /*AllowExplicit=*/false, ADLCallKind::UsesADL);
+ } else {
+ AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I), FoundDecl,
+ ExplicitTemplateArgs, Args, CandidateSet,
+ /*SupressUserConversions=*/false,
+ PartialOverloading, ADLCallKind::UsesADL);
+ }
}
}
@@ -8967,31 +9020,31 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
return Cand1Attr ? Comparison::Better : Comparison::Worse;
}
- // FIXME: The next several lines are just
- // specific_attr_iterator<EnableIfAttr> but going in declaration order,
- // instead of reverse order which is how they're stored in the AST.
- auto Cand1Attrs = getOrderedEnableIfAttrs(Cand1);
- auto Cand2Attrs = getOrderedEnableIfAttrs(Cand2);
-
- // It's impossible for Cand1 to be better than (or equal to) Cand2 if Cand1
- // has fewer enable_if attributes than Cand2.
- if (Cand1Attrs.size() < Cand2Attrs.size())
- return Comparison::Worse;
+ auto Cand1Attrs = Cand1->specific_attrs<EnableIfAttr>();
+ auto Cand2Attrs = Cand2->specific_attrs<EnableIfAttr>();
- auto Cand1I = Cand1Attrs.begin();
llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- for (auto &Cand2A : Cand2Attrs) {
+ for (auto Pair : zip_longest(Cand1Attrs, Cand2Attrs)) {
+ Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
+
+ // It's impossible for Cand1 to be better than (or equal to) Cand2 if Cand1
+ // has fewer enable_if attributes than Cand2, and vice versa.
+ if (!Cand1A)
+ return Comparison::Worse;
+ if (!Cand2A)
+ return Comparison::Better;
+
Cand1ID.clear();
Cand2ID.clear();
- auto &Cand1A = *Cand1I++;
- Cand1A->getCond()->Profile(Cand1ID, S.getASTContext(), true);
- Cand2A->getCond()->Profile(Cand2ID, S.getASTContext(), true);
+ (*Cand1A)->getCond()->Profile(Cand1ID, S.getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, S.getASTContext(), true);
if (Cand1ID != Cand2ID)
return Comparison::Worse;
}
- return Cand1I == Cand1Attrs.end() ? Comparison::Equal : Comparison::Better;
+ return Comparison::Equal;
}
static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
@@ -9000,6 +9053,11 @@ static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
!Cand2.Function->isMultiVersion())
return false;
+ // If Cand1 is invalid, it cannot be a better match, if Cand2 is invalid, this
+ // is obviously better.
+ if (Cand1.Function->isInvalidDecl()) return false;
+ if (Cand2.Function->isInvalidDecl()) return true;
+
// If this is a cpu_dispatch/cpu_specific multiversion situation, prefer
// cpu_dispatch, else arbitrarily based on the identifiers.
bool Cand1CPUDisp = Cand1.Function->hasAttr<CPUDispatchAttr>();
@@ -9506,7 +9564,7 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
if (!isFunctionAlwaysEnabled(S.Context, FD)) {
if (Complain) {
if (InOverloadResolution)
- S.Diag(FD->getLocStart(),
+ S.Diag(FD->getBeginLoc(),
diag::note_addrof_ovl_candidate_disabled_by_enable_if_attr);
else
S.Diag(Loc, diag::err_addrof_function_disabled_by_enable_if_attr) << FD;
@@ -10010,7 +10068,7 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
DeductionFailure.getFirstArg()->getNonTypeTemplateArgumentType();
QualType T2 =
DeductionFailure.getSecondArg()->getNonTypeTemplateArgumentType();
- if (!S.Context.hasSameType(T1, T2)) {
+ if (!T1.isNull() && !T2.isNull() && !S.Context.hasSameType(T1, T2)) {
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_inconsistent_deduction_types)
<< ParamD->getDeclName() << *DeductionFailure.getFirstArg() << T1
@@ -10268,7 +10326,8 @@ static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
S.Diag(Callee->getLocation(),
- diag::note_ovl_candidate_disabled_by_extension);
+ diag::note_ovl_candidate_disabled_by_extension)
+ << S.getOpenCLExtensionsFromDeclExtMap(Callee);
}
/// Generates a 'note' diagnostic for an overload candidate. We've
@@ -10836,8 +10895,7 @@ void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
// in general, want to list every possible builtin candidate.
}
- llvm::sort(Cands.begin(), Cands.end(),
- CompareTemplateSpecCandidatesForDisplay(S));
+ llvm::sort(Cands, CompareTemplateSpecCandidatesForDisplay(S));
// FIXME: Perhaps rename OverloadsShown and getShowOverloads()
// for generalization purposes (?).
@@ -11023,7 +11081,7 @@ private:
// Note: We explicitly leave Matches unmodified if there isn't a clear best
// option, so we can potentially give the user a better error
- if (!std::all_of(Matches.begin(), Matches.end(), IsBestOrInferiorToBest))
+ if (!llvm::all_of(Matches, IsBestOrInferiorToBest))
return false;
Matches[0] = *Best;
Matches.resize(1);
@@ -11114,7 +11172,7 @@ private:
// If any candidate has a placeholder return type, trigger its deduction
// now.
- if (completeFunctionType(S, FunDecl, SourceExpr->getLocStart(),
+ if (completeFunctionType(S, FunDecl, SourceExpr->getBeginLoc(),
Complain)) {
HasComplained |= Complain;
return false;
@@ -11190,7 +11248,7 @@ private:
// here, since the no_viable diagnostic has index 0.
UnresolvedSetIterator Result = S.getMostSpecialized(
MatchesCopy.begin(), MatchesCopy.end(), FailedCandidates,
- SourceExpr->getLocStart(), S.PDiag(),
+ SourceExpr->getBeginLoc(), S.PDiag(),
S.PDiag(diag::err_addr_ovl_ambiguous)
<< Matches[0].second->getDeclName(),
S.PDiag(diag::note_ovl_candidate)
@@ -11226,7 +11284,7 @@ private:
public:
void ComplainNoMatchesFound() const {
assert(Matches.empty());
- S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
+ S.Diag(OvlExpr->getBeginLoc(), diag::err_addr_ovl_no_viable)
<< OvlExpr->getName() << TargetFunctionType
<< OvlExpr->getSourceRange();
if (FailedCandidates.empty())
@@ -11244,7 +11302,7 @@ public:
if (!functionHasPassObjectSizeParams(Fun))
S.NoteOverloadCandidate(*I, Fun, TargetFunctionType,
/*TakingAddress=*/true);
- FailedCandidates.NoteCandidates(S, OvlExpr->getLocStart());
+ FailedCandidates.NoteCandidates(S, OvlExpr->getBeginLoc());
}
}
@@ -11266,21 +11324,20 @@ public:
}
void ComplainIsStaticMemberFunctionFromBoundPointer() const {
- S.Diag(OvlExpr->getLocStart(),
+ S.Diag(OvlExpr->getBeginLoc(),
diag::err_invalid_form_pointer_member_function)
- << OvlExpr->getSourceRange();
+ << OvlExpr->getSourceRange();
}
void ComplainOfInvalidConversion() const {
- S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
- << OvlExpr->getName() << TargetType;
+ S.Diag(OvlExpr->getBeginLoc(), diag::err_addr_ovl_not_func_ptrref)
+ << OvlExpr->getName() << TargetType;
}
void ComplainMultipleMatchesFound() const {
assert(Matches.size() > 1);
- S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
- << OvlExpr->getName()
- << OvlExpr->getSourceRange();
+ S.Diag(OvlExpr->getBeginLoc(), diag::err_addr_ovl_ambiguous)
+ << OvlExpr->getName() << OvlExpr->getSourceRange();
S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
/*TakingAddress=*/true);
}
@@ -11530,7 +11587,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult SingleFunctionExpression;
if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization(
ovl.Expression, /*complain*/ false, &found)) {
- if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) {
+ if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getBeginLoc())) {
SrcExpr = ExprError();
return true;
}
@@ -11966,14 +12023,14 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
OverloadCandidateSet::iterator Best;
if (CandidateSet->empty() ||
- CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best) ==
+ CandidateSet->BestViableFunction(*this, Fn->getBeginLoc(), Best) ==
OR_No_Viable_Function) {
- // In Microsoft mode, if we are inside a template class member function then
- // create a type dependent CallExpr. The goal is to postpone name lookup
- // to instantiation time to be able to search into type dependent base
- // classes.
- CallExpr *CE = new (Context) CallExpr(
- Context, Fn, Args, Context.DependentTy, VK_RValue, RParenLoc);
+ // In Microsoft mode, if we are inside a template class member function
+ // then create a type dependent CallExpr. The goal is to postpone name
+ // lookup to instantiation time to be able to search into type dependent
+ // base classes.
+ CallExpr *CE = CallExpr::Create(Context, Fn, Args, Context.DependentTy,
+ VK_RValue, RParenLoc);
CE->setTypeDependent(true);
CE->setValueDependent(true);
CE->setInstantiationDependent(true);
@@ -12015,7 +12072,8 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
return ExprError();
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig);
+ ExecConfig, /*IsExecConfig=*/false,
+ (*Best)->IsADLCandidate);
}
case OR_No_Viable_Function: {
@@ -12043,24 +12101,23 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
}
}
- SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call)
+ SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_no_viable_function_in_call)
<< ULE->getName() << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
break;
}
case OR_Ambiguous:
- SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
- << ULE->getName() << Fn->getSourceRange();
+ SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_ambiguous_call)
+ << ULE->getName() << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, Args);
break;
case OR_Deleted: {
- SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
- << (*Best)->Function->isDeleted()
- << ULE->getName()
- << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
- << Fn->getSourceRange();
+ SemaRef.Diag(Fn->getBeginLoc(), diag::err_ovl_deleted_call)
+ << (*Best)->Function->isDeleted() << ULE->getName()
+ << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
+ << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
// We emitted an error for the unavailable/deleted function call but keep
@@ -12068,7 +12125,8 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
FunctionDecl *FDecl = (*Best)->Function;
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig);
+ ExecConfig, /*IsExecConfig=*/false,
+ (*Best)->IsADLCandidate);
}
}
@@ -12116,7 +12174,7 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
OverloadCandidateSet::iterator Best;
OverloadingResult OverloadResult =
- CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best);
+ CandidateSet.BestViableFunction(*this, Fn->getBeginLoc(), Best);
return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args,
RParenLoc, ExecConfig, &CandidateSet,
@@ -12178,14 +12236,12 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
VK_RValue, OK_Ordinary, OpLoc, false);
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
- UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, NamingClass,
- NestedNameSpecifierLoc(), OpNameInfo,
- /*ADL*/ true, IsOverloaded(Fns),
- Fns.begin(), Fns.end());
- return new (Context)
- CXXOperatorCallExpr(Context, Op, Fn, ArgsArray, Context.DependentTy,
- VK_RValue, OpLoc, FPOptions());
+ UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
+ Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end());
+ return CXXOperatorCallExpr::Create(Context, Op, Fn, ArgsArray,
+ Context.DependentTy, VK_RValue, OpLoc,
+ FPOptions());
}
// Build an empty overload set.
@@ -12257,9 +12313,9 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
ResultTy = ResultTy.getNonLValueExprType(Context);
Args[0] = Input;
- CallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray,
- ResultTy, VK, OpLoc, FPOptions());
+ CallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, Op, FnExpr.get(), ArgsArray, ResultTy, VK, OpLoc,
+ FPOptions(), Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
return ExprError();
@@ -12369,14 +12425,12 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
// TODO: provide better source location info in DNLoc component.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
- UnresolvedLookupExpr *Fn
- = UnresolvedLookupExpr::Create(Context, NamingClass,
- NestedNameSpecifierLoc(), OpNameInfo,
- /*ADL*/PerformADL, IsOverloaded(Fns),
- Fns.begin(), Fns.end());
- return new (Context)
- CXXOperatorCallExpr(Context, Op, Fn, Args, Context.DependentTy,
- VK_RValue, OpLoc, FPFeatures);
+ UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(
+ Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ PerformADL, IsOverloaded(Fns), Fns.begin(), Fns.end());
+ return CXXOperatorCallExpr::Create(Context, Op, Fn, Args,
+ Context.DependentTy, VK_RValue, OpLoc,
+ FPFeatures);
}
// Always do placeholder-like conversions on the RHS.
@@ -12489,10 +12543,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(),
- Args, ResultTy, VK, OpLoc,
- FPFeatures);
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, Op, FnExpr.get(), Args, ResultTy, VK, OpLoc, FPFeatures,
+ Best->IsADLCandidate);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
FnDecl))
@@ -12638,9 +12691,9 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
UnresolvedSetIterator());
// Can't add any actual overloads yet
- return new (Context)
- CXXOperatorCallExpr(Context, OO_Subscript, Fn, Args,
- Context.DependentTy, VK_RValue, RLoc, FPOptions());
+ return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn, Args,
+ Context.DependentTy, VK_RValue, RLoc,
+ FPOptions());
}
// Handle placeholders on both operands.
@@ -12714,10 +12767,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
- FnExpr.get(), Args,
- ResultTy, VK, RLoc,
- FPOptions());
+ CXXOperatorCallExpr::Create(Context, OO_Subscript, FnExpr.get(),
+ Args, ResultTy, VK, RLoc, FPOptions());
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
@@ -12819,7 +12870,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// Check that the object type isn't more qualified than the
// member function we're calling.
- Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals());
+ Qualifiers funcQuals = proto->getTypeQuals();
QualType objectType = op->getLHS()->getType();
if (op->getOpcode() == BO_PtrMemI)
@@ -12837,11 +12888,11 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
<< (qualsString.find(' ') == std::string::npos ? 1 : 2);
}
- CXXMemberCallExpr *call
- = new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
- resultType, valueKind, RParenLoc);
+ CXXMemberCallExpr *call =
+ CXXMemberCallExpr::Create(Context, MemExprE, Args, resultType,
+ valueKind, RParenLoc, proto->getNumParams());
- if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getLocStart(),
+ if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getBeginLoc(),
call, nullptr))
return ExprError();
@@ -12855,8 +12906,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
if (isa<CXXPseudoDestructorExpr>(NakedMemExpr))
- return new (Context)
- CallExpr(Context, MemExprE, Args, Context.VoidTy, VK_RValue, RParenLoc);
+ return CallExpr::Create(Context, MemExprE, Args, Context.VoidTy, VK_RValue,
+ RParenLoc);
UnbridgedCastsSet UnbridgedCasts;
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
@@ -12927,7 +12978,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
UnbridgedCasts.restore();
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
+ switch (CandidateSet.BestViableFunction(*this, UnresExpr->getBeginLoc(),
Best)) {
case OR_Success:
Method = cast<CXXMethodDecl>(Best->Function);
@@ -12989,9 +13040,10 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
ResultType = ResultType.getNonLValueExprType(Context);
assert(Method && "Member call to something that isn't a method?");
+ const auto *Proto = Method->getType()->getAs<FunctionProtoType>();
CXXMemberCallExpr *TheCall =
- new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
- ResultType, VK, RParenLoc);
+ CXXMemberCallExpr::Create(Context, MemExprE, Args, ResultType, VK,
+ RParenLoc, Proto->getNumParams());
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
@@ -13011,8 +13063,6 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
// Convert the rest of the arguments
- const FunctionProtoType *Proto =
- Method->getType()->getAs<FunctionProtoType>();
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
RParenLoc))
return ExprError();
@@ -13044,17 +13094,15 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()) &&
MemExpr->performsVirtualDispatch(getLangOpts())) {
- Diag(MemExpr->getLocStart(),
+ Diag(MemExpr->getBeginLoc(),
diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
- << MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
- << MD->getParent()->getDeclName();
+ << MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
+ << MD->getParent()->getDeclName();
- Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
+ Diag(MD->getBeginLoc(), diag::note_previous_decl) << MD->getDeclName();
if (getLangOpts().AppleKext)
- Diag(MemExpr->getLocStart(),
- diag::note_pure_qualified_call_kext)
- << MD->getParent()->getDeclName()
- << MD->getDeclName();
+ Diag(MemExpr->getBeginLoc(), diag::note_pure_qualified_call_kext)
+ << MD->getParent()->getDeclName() << MD->getDeclName();
}
}
@@ -13062,7 +13110,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
dyn_cast<CXXDestructorDecl>(TheCall->getMethodDecl())) {
// a->A::f() doesn't go through the vtable, except in AppleKext mode.
bool CallCanBeVirtual = !MemExpr->hasQualifier() || getLangOpts().AppleKext;
- CheckVirtualDtorCall(DD, MemExpr->getLocStart(), /*IsDelete=*/false,
+ CheckVirtualDtorCall(DD, MemExpr->getBeginLoc(), /*IsDelete=*/false,
CallCanBeVirtual, /*WarnOnNonAbstractTypes=*/true,
MemExpr->getMemberLoc());
}
@@ -13167,7 +13215,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(*this, Object.get()->getLocStart(),
+ switch (CandidateSet.BestViableFunction(*this, Object.get()->getBeginLoc(),
Best)) {
case OR_Success:
// Overload resolution succeeded; we'll build the appropriate call
@@ -13176,30 +13224,26 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
case OR_No_Viable_Function:
if (CandidateSet.empty())
- Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper)
- << Object.get()->getType() << /*call*/ 1
- << Object.get()->getSourceRange();
+ Diag(Object.get()->getBeginLoc(), diag::err_ovl_no_oper)
+ << Object.get()->getType() << /*call*/ 1
+ << Object.get()->getSourceRange();
else
- Diag(Object.get()->getLocStart(),
- diag::err_ovl_no_viable_object_call)
- << Object.get()->getType() << Object.get()->getSourceRange();
+ Diag(Object.get()->getBeginLoc(), diag::err_ovl_no_viable_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
break;
case OR_Ambiguous:
- Diag(Object.get()->getLocStart(),
- diag::err_ovl_ambiguous_object_call)
- << Object.get()->getType() << Object.get()->getSourceRange();
+ Diag(Object.get()->getBeginLoc(), diag::err_ovl_ambiguous_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
break;
case OR_Deleted:
- Diag(Object.get()->getLocStart(),
- diag::err_ovl_deleted_object_call)
- << Best->Function->isDeleted()
- << Object.get()->getType()
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Object.get()->getSourceRange();
+ Diag(Object.get()->getBeginLoc(), diag::err_ovl_deleted_object_call)
+ << Best->Function->isDeleted() << Object.get()->getType()
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
break;
}
@@ -13266,29 +13310,14 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (NewFn.isInvalid())
return true;
+ // The number of argument slots to allocate in the call. If we have default
+ // arguments we need to allocate space for them as well. We additionally
+ // need one more slot for the object parameter.
+ unsigned NumArgsSlots = 1 + std::max<unsigned>(Args.size(), NumParams);
+
// Build the full argument list for the method call (the implicit object
// parameter is placed at the beginning of the list).
- SmallVector<Expr *, 8> MethodArgs(Args.size() + 1);
- MethodArgs[0] = Object.get();
- std::copy(Args.begin(), Args.end(), MethodArgs.begin() + 1);
-
- // Once we've built TheCall, all of the expressions are properly
- // owned.
- QualType ResultTy = Method->getReturnType();
- ExprValueKind VK = Expr::getValueKindForType(ResultTy);
- ResultTy = ResultTy.getNonLValueExprType(Context);
-
- CXXOperatorCallExpr *TheCall = new (Context)
- CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), MethodArgs, ResultTy,
- VK, RParenLoc, FPOptions());
-
- if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
- return true;
-
- // We may have default arguments. If so, we need to allocate more
- // slots in the call for them.
- if (Args.size() < NumParams)
- TheCall->setNumArgs(Context, NumParams + 1);
+ SmallVector<Expr *, 8> MethodArgs(NumArgsSlots);
bool IsError = false;
@@ -13300,7 +13329,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
IsError = true;
else
Object = ObjRes;
- TheCall->setArg(0, Object.get());
+ MethodArgs[0] = Object.get();
// Check the argument types.
for (unsigned i = 0; i != NumParams; i++) {
@@ -13329,7 +13358,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
Arg = DefArg.getAs<Expr>();
}
- TheCall->setArg(i + 1, Arg);
+ MethodArgs[i + 1] = Arg;
}
// If this is a variadic call, handle args passed through "...".
@@ -13339,14 +13368,27 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
IsError |= Arg.isInvalid();
- TheCall->setArg(i + 1, Arg.get());
+ MethodArgs[i + 1] = Arg.get();
}
}
- if (IsError) return true;
+ if (IsError)
+ return true;
DiagnoseSentinelCalls(Method, LParenLoc, Args);
+ // Once we've built TheCall, all of the expressions are properly owned.
+ QualType ResultTy = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall =
+ CXXOperatorCallExpr::Create(Context, OO_Call, NewFn.get(), MethodArgs,
+ ResultTy, VK, RParenLoc, FPOptions());
+
+ if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
+ return true;
+
if (CheckFunctionCall(Method, TheCall, Proto))
return true;
@@ -13458,9 +13500,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
- new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.get(),
- Base, ResultTy, VK, OpLoc, FPOptions());
+ CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Arrow, FnExpr.get(), Base, ResultTy, VK, OpLoc, FPOptions());
if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
return ExprError();
@@ -13532,10 +13573,9 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- UserDefinedLiteral *UDL =
- new (Context) UserDefinedLiteral(Context, Fn.get(),
- llvm::makeArrayRef(ConvArgs, Args.size()),
- ResultTy, VK, LitEndLoc, UDSuffixLoc);
+ UserDefinedLiteral *UDL = UserDefinedLiteral::Create(
+ Context, Fn.get(), llvm::makeArrayRef(ConvArgs, Args.size()), ResultTy,
+ VK, LitEndLoc, UDSuffixLoc);
if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
return ExprError();
@@ -13596,7 +13636,7 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
}
OverloadCandidateSet::iterator Best;
OverloadingResult OverloadResult =
- CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best);
+ CandidateSet->BestViableFunction(*this, Fn->getBeginLoc(), Best);
if (OverloadResult == OR_No_Viable_Function) {
*CallExpr = ExprError();
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index a8af75d87c8d..ebf1d10aa16a 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -977,7 +977,7 @@ ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) {
if (isWeakProperty() && !S.isUnevaluatedContext() &&
!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
- SyntacticForm->getLocStart()))
+ SyntacticForm->getBeginLoc()))
S.getCurFunction()->recordUseOfWeak(SyntacticRefExpr,
SyntacticRefExpr->isMessagingGetter());
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 377e2c4dfa23..9e30c9a396c0 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -42,12 +42,11 @@
using namespace clang;
using namespace sema;
-StmtResult Sema::ActOnExprStmt(ExprResult FE) {
+StmtResult Sema::ActOnExprStmt(ExprResult FE, bool DiscardedValue) {
if (FE.isInvalid())
return StmtError();
- FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(),
- /*DiscardedValue*/ true);
+ FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(), DiscardedValue);
if (FE.isInvalid())
return StmtError();
@@ -246,7 +245,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
unsigned DiagID = diag::warn_unused_expr;
- if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
+ if (const FullExpr *Temps = dyn_cast<FullExpr>(E))
E = Temps->getSubExpr();
if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
E = TempExpr->getSubExpr();
@@ -259,17 +258,16 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (E->getType()->isVoidType())
return;
+ if (const Attr *A = CE->getUnusedResultAttr(Context)) {
+ Diag(Loc, diag::warn_unused_result) << A << R1 << R2;
+ return;
+ }
+
// If the callee has attribute pure, const, or warn_unused_result, warn with
// a more specific message to make it clear what is happening. If the call
// is written in a macro body, only warn if it has the warn_unused_result
// attribute.
if (const Decl *FD = CE->getCalleeDecl()) {
- if (const Attr *A = isa<FunctionDecl>(FD)
- ? cast<FunctionDecl>(FD)->getUnusedResultAttr()
- : FD->getAttr<WarnUnusedResultAttr>()) {
- Diag(Loc, diag::warn_unused_result) << A << R1 << R2;
- return;
- }
if (ShouldSuppress)
return;
if (FD->hasAttr<PureAttr>()) {
@@ -349,6 +347,10 @@ sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
return getCurFunction()->CompoundScopes.back();
}
+bool Sema::isCurCompoundStmtAStmtExpr() const {
+ return getCurCompoundScope().IsStmtExpr;
+}
+
StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
@@ -371,14 +373,6 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
Diag(D->getLocation(), diag::ext_mixed_decls_code);
}
}
- // Warn about unused expressions in statements.
- for (unsigned i = 0; i != NumElts; ++i) {
- // Ignore statements that are last in a statement expression.
- if (isStmtExpr && i == NumElts - 1)
- continue;
-
- DiagnoseUnusedExprResult(Elts[i]);
- }
// Check for suspicious empty body (null statement) in `for' and `while'
// statements. Don't do anything for template instantiations, this just adds
@@ -462,25 +456,20 @@ Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHSVal,
return StmtError();
}
- CaseStmt *CS = new (Context)
- CaseStmt(LHSVal.get(), RHSVal.get(), CaseLoc, DotDotDotLoc, ColonLoc);
+ auto *CS = CaseStmt::Create(Context, LHSVal.get(), RHSVal.get(),
+ CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back().getPointer()->addSwitchCase(CS);
return CS;
}
/// ActOnCaseStmtBody - This installs a statement as the body of a case.
-void Sema::ActOnCaseStmtBody(Stmt *caseStmt, Stmt *SubStmt) {
- DiagnoseUnusedExprResult(SubStmt);
-
- CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
- CS->setSubStmt(SubStmt);
+void Sema::ActOnCaseStmtBody(Stmt *S, Stmt *SubStmt) {
+ cast<CaseStmt>(S)->setSubStmt(SubStmt);
}
StmtResult
Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope) {
- DiagnoseUnusedExprResult(SubStmt);
-
if (getCurFunction()->SwitchStack.empty()) {
Diag(DefaultLoc, diag::err_default_not_in_switch);
return SubStmt;
@@ -551,12 +540,13 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt,
false);
Expr *CondExpr = Cond.get().second;
- if (!Diags.isIgnored(diag::warn_comma_operator,
- CondExpr->getExprLoc()))
+ // Only call the CommaVisitor when not C89 due to differences in scope flags.
+ if ((getLangOpts().C99 || getLangOpts().CPlusPlus) &&
+ !Diags.isIgnored(diag::warn_comma_operator, CondExpr->getExprLoc()))
CommaVisitor(*this).Visit(CondExpr);
if (!elseStmt)
- DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), thenStmt,
+ DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt,
diag::warn_empty_if_body);
return BuildIfStmt(IfLoc, IsConstexpr, InitStmt, Cond, thenStmt, ElseLoc,
@@ -573,12 +563,8 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
if (IsConstexpr || isa<ObjCAvailabilityCheckExpr>(Cond.get().second))
setFunctionHasBranchProtectedScope();
- DiagnoseUnusedExprResult(thenStmt);
- DiagnoseUnusedExprResult(elseStmt);
-
- return new (Context)
- IfStmt(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
- Cond.get().second, thenStmt, ElseLoc, elseStmt);
+ return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
+ Cond.get().second, thenStmt, ElseLoc, elseStmt);
}
namespace {
@@ -631,8 +617,8 @@ static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
/// potentially integral-promoted expression @p expr.
static QualType GetTypeBeforeIntegralPromotion(const Expr *&E) {
- if (const auto *CleanUps = dyn_cast<ExprWithCleanups>(E))
- E = CleanUps->getSubExpr();
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
while (const auto *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
if (ImpCast->getCastKind() != CK_IntegralCast) break;
E = ImpCast->getSubExpr();
@@ -727,8 +713,7 @@ StmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
setFunctionHasBranchIntoScope();
- SwitchStmt *SS = new (Context)
- SwitchStmt(Context, InitStmt, Cond.get().first, CondExpr);
+ auto *SS = SwitchStmt::Create(Context, InitStmt, Cond.get().first, CondExpr);
getCurFunction()->SwitchStack.push_back(
FunctionScopeInfo::SwitchInfo(SS, false));
return SS;
@@ -918,8 +903,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Check the unconverted value is within the range of possible values of
// the switch expression.
- checkCaseValue(*this, Lo->getLocStart(), LoVal,
- CondWidthBeforePromotion, CondIsSignedBeforePromotion);
+ checkCaseValue(*this, Lo->getBeginLoc(), LoVal, CondWidthBeforePromotion,
+ CondIsSignedBeforePromotion);
// FIXME: This duplicates the check performed for warn_not_in_enum below.
checkEnumTypesInSwitchStmt(*this, CondExprBeforePromotion,
@@ -946,8 +931,11 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
llvm::APSInt ConstantCondValue;
bool HasConstantCond = false;
if (!HasDependentValue && !TheDefaultStmt) {
- HasConstantCond = CondExpr->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::EvalResult Result;
+ HasConstantCond = CondExpr->EvaluateAsInt(Result, Context,
Expr::SE_AllowSideEffects);
+ if (Result.Val.isInt())
+ ConstantCondValue = Result.Val.getInt();
assert(!HasConstantCond ||
(ConstantCondValue.getBitWidth() == CondWidth &&
ConstantCondValue.isSigned() == CondIsSigned));
@@ -979,17 +967,17 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
CaseVals[i-1].first.toString(CaseValStr);
if (PrevString == CurrString)
- Diag(CaseVals[i].second->getLHS()->getLocStart(),
- diag::err_duplicate_case) <<
- (PrevString.empty() ? StringRef(CaseValStr) : PrevString);
+ Diag(CaseVals[i].second->getLHS()->getBeginLoc(),
+ diag::err_duplicate_case)
+ << (PrevString.empty() ? StringRef(CaseValStr) : PrevString);
else
- Diag(CaseVals[i].second->getLHS()->getLocStart(),
- diag::err_duplicate_case_differing_expr) <<
- (PrevString.empty() ? StringRef(CaseValStr) : PrevString) <<
- (CurrString.empty() ? StringRef(CaseValStr) : CurrString) <<
- CaseValStr;
+ Diag(CaseVals[i].second->getLHS()->getBeginLoc(),
+ diag::err_duplicate_case_differing_expr)
+ << (PrevString.empty() ? StringRef(CaseValStr) : PrevString)
+ << (CurrString.empty() ? StringRef(CaseValStr) : CurrString)
+ << CaseValStr;
- Diag(CaseVals[i-1].second->getLHS()->getLocStart(),
+ Diag(CaseVals[i - 1].second->getLHS()->getBeginLoc(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
// substmt, but we have no way to do this right now.
@@ -1018,7 +1006,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// Check the unconverted value is within the range of possible values of
// the switch expression.
- checkCaseValue(*this, Hi->getLocStart(), HiVal,
+ checkCaseValue(*this, Hi->getBeginLoc(), HiVal,
CondWidthBeforePromotion, CondIsSignedBeforePromotion);
// Convert the value to the same width/sign as the condition.
@@ -1026,9 +1014,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// If the low value is bigger than the high value, the case is empty.
if (LoVal > HiVal) {
- Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
- << SourceRange(CR->getLHS()->getLocStart(),
- Hi->getLocEnd());
+ Diag(CR->getLHS()->getBeginLoc(), diag::warn_case_empty_range)
+ << SourceRange(CR->getLHS()->getBeginLoc(), Hi->getEndLoc());
CaseRanges.erase(CaseRanges.begin()+i);
--i;
--e;
@@ -1082,9 +1069,9 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (OverlapStmt) {
// If we have a duplicate, report it.
- Diag(CR->getLHS()->getLocStart(), diag::err_duplicate_case)
- << OverlapVal.toString(10);
- Diag(OverlapStmt->getLHS()->getLocStart(),
+ Diag(CR->getLHS()->getBeginLoc(), diag::err_duplicate_case)
+ << OverlapVal.toString(10);
+ Diag(OverlapStmt->getLHS()->getBeginLoc(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
// substmt, but we have no way to do this right now.
@@ -1165,7 +1152,21 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
SmallVector<DeclarationName,8> UnhandledNames;
- for (EI = EnumVals.begin(); EI != EIEnd; EI++){
+ for (EI = EnumVals.begin(); EI != EIEnd; EI++) {
+ // Don't warn about omitted unavailable EnumConstantDecls.
+ switch (EI->second->getAvailability()) {
+ case AR_Deprecated:
+ // Omitting a deprecated constant is ok; it should never materialize.
+ case AR_Unavailable:
+ continue;
+
+ case AR_NotYetIntroduced:
+ // Partially available enum constants should be present. Note that we
+ // suppress -Wunguarded-availability diagnostics for such uses.
+ case AR_Available:
+ break;
+ }
+
// Drop unneeded case values
while (CI != CaseVals.end() && CI->first < EI->first)
CI++;
@@ -1209,7 +1210,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
}
if (BodyStmt)
- DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), BodyStmt,
+ DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), BodyStmt,
diag::warn_empty_switch_body);
// FIXME: If the case list was broken is some way, we don't have a good system
@@ -1289,13 +1290,11 @@ StmtResult Sema::ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
!Diags.isIgnored(diag::warn_comma_operator, CondVal.second->getExprLoc()))
CommaVisitor(*this).Visit(CondVal.second);
- DiagnoseUnusedExprResult(Body);
-
if (isa<NullStmt>(Body))
getCurCompoundScope().setHasEmptyLoopBodies();
- return new (Context)
- WhileStmt(Context, CondVal.first, CondVal.second, Body, WhileLoc);
+ return WhileStmt::Create(Context, CondVal.first, CondVal.second, Body,
+ WhileLoc);
}
StmtResult
@@ -1310,12 +1309,15 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
return StmtError();
Cond = CondResult.get();
- CondResult = ActOnFinishFullExpr(Cond, DoLoc);
+ CondResult = ActOnFinishFullExpr(Cond, DoLoc, /*DiscardedValue*/ false);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.get();
- DiagnoseUnusedExprResult(Body);
+ // Only call the CommaVisitor for C89 due to differences in scope flags.
+ if (Cond && !getLangOpts().C99 && !getLangOpts().CPlusPlus &&
+ !Diags.isIgnored(diag::warn_comma_operator, Cond->getExprLoc()))
+ CommaVisitor(*this).Visit(Cond);
return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen);
}
@@ -1396,7 +1398,11 @@ namespace {
void VisitDeclRefExpr(DeclRefExpr *E) {
VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
- if (!VD) return;
+ if (!VD) {
+ // Don't allow unhandled Decl types.
+ Simple = false;
+ return;
+ }
Ranges.push_back(E->getSourceRange());
@@ -1492,7 +1498,7 @@ namespace {
if (!Second) return;
if (S.Diags.isIgnored(diag::warn_variables_not_in_loop_body,
- Second->getLocStart()))
+ Second->getBeginLoc()))
return;
PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body);
@@ -1634,6 +1640,8 @@ namespace {
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
// Only visit the initialization of a for loop; the body
// has a different break/continue scope.
+ if (const Stmt *Init = S->getInit())
+ Visit(Init);
if (const Stmt *Range = S->getRangeStmt())
Visit(Range);
if (const Stmt *Begin = S->getBeginStmt())
@@ -1668,7 +1676,7 @@ namespace {
if (!Body || !Third) return;
if (S.Diags.isIgnored(diag::warn_redundant_loop_iteration,
- Third->getLocStart()))
+ Third->getBeginLoc()))
return;
// Get the last statement from the loop body.
@@ -1755,11 +1763,6 @@ StmtResult Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
CommaVisitor(*this).Visit(Second.get().second);
Expr *Third = third.release().getAs<Expr>();
-
- DiagnoseUnusedExprResult(First);
- DiagnoseUnusedExprResult(Third);
- DiagnoseUnusedExprResult(Body);
-
if (isa<NullStmt>(Body))
getCurCompoundScope().setHasEmptyLoopBodies();
@@ -1779,7 +1782,7 @@ StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
if (result.isInvalid()) return StmtError();
E = result.get();
- ExprResult FullExpr = ActOnFinishFullExpr(E);
+ ExprResult FullExpr = ActOnFinishFullExpr(E, /*DiscardedValue*/ false);
if (FullExpr.isInvalid())
return StmtError();
return StmtResult(static_cast<Stmt*>(FullExpr.get()));
@@ -1914,9 +1917,9 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
} else {
Expr *FirstE = cast<Expr>(First);
if (!FirstE->isTypeDependent() && !FirstE->isLValue())
- return StmtError(Diag(First->getLocStart(),
- diag::err_selector_element_not_lvalue)
- << First->getSourceRange());
+ return StmtError(
+ Diag(First->getBeginLoc(), diag::err_selector_element_not_lvalue)
+ << First->getSourceRange());
FirstType = static_cast<Expr*>(First)->getType();
if (FirstType.isConstQualified())
@@ -1933,7 +1936,8 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
if (CollectionExprResult.isInvalid())
return StmtError();
- CollectionExprResult = ActOnFinishFullExpr(CollectionExprResult.get());
+ CollectionExprResult =
+ ActOnFinishFullExpr(CollectionExprResult.get(), /*DiscardedValue*/ false);
if (CollectionExprResult.isInvalid())
return StmtError();
@@ -2052,21 +2056,26 @@ static bool ObjCEnumerationCollection(Expr *Collection) {
/// The body of the loop is not available yet, since it cannot be analysed until
/// we have determined the type of the for-range-declaration.
StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
- SourceLocation CoawaitLoc, Stmt *First,
- SourceLocation ColonLoc, Expr *Range,
- SourceLocation RParenLoc,
+ SourceLocation CoawaitLoc, Stmt *InitStmt,
+ Stmt *First, SourceLocation ColonLoc,
+ Expr *Range, SourceLocation RParenLoc,
BuildForRangeKind Kind) {
if (!First)
return StmtError();
- if (Range && ObjCEnumerationCollection(Range))
+ if (Range && ObjCEnumerationCollection(Range)) {
+ // FIXME: Support init-statements in Objective-C++20 ranged for statement.
+ if (InitStmt)
+ return Diag(InitStmt->getBeginLoc(), diag::err_objc_for_range_init_stmt)
+ << InitStmt->getSourceRange();
return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
+ }
DeclStmt *DS = dyn_cast<DeclStmt>(First);
assert(DS && "first part of for range not a decl stmt");
if (!DS->isSingleDecl()) {
- Diag(DS->getStartLoc(), diag::err_type_defined_in_for_range);
+ Diag(DS->getBeginLoc(), diag::err_type_defined_in_for_range);
return StmtError();
}
@@ -2087,7 +2096,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
// Build auto && __range = range-init
// Divide by 2, since the variables are in the inner scope (loop body).
const auto DepthStr = std::to_string(S->getDepth() / 2);
- SourceLocation RangeLoc = Range->getLocStart();
+ SourceLocation RangeLoc = Range->getBeginLoc();
VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
Context.getAutoRRefDeductType(),
std::string("__range") + DepthStr);
@@ -2106,10 +2115,10 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
return StmtError();
}
- return BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc, RangeDecl.get(),
- /*BeginStmt=*/nullptr, /*EndStmt=*/nullptr,
- /*Cond=*/nullptr, /*Inc=*/nullptr,
- DS, RParenLoc, Kind);
+ return BuildCXXForRangeStmt(
+ ForLoc, CoawaitLoc, InitStmt, ColonLoc, RangeDecl.get(),
+ /*BeginStmt=*/nullptr, /*EndStmt=*/nullptr,
+ /*Cond=*/nullptr, /*Inc=*/nullptr, DS, RParenLoc, Kind);
}
/// Create the initialization, compare, and increment steps for
@@ -2136,6 +2145,56 @@ BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange,
Sema::LookupMemberName);
LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName);
+ auto BuildBegin = [&] {
+ *BEF = BEF_begin;
+ Sema::ForRangeStatus RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo,
+ BeginMemberLookup, CandidateSet,
+ BeginRange, BeginExpr);
+
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(BeginRange->getBeginLoc(), diag::note_in_for_range)
+ << ColonLoc << BEF_begin << BeginRange->getType();
+ return RangeStatus;
+ }
+ if (!CoawaitLoc.isInvalid()) {
+ // FIXME: getCurScope() should not be used during template instantiation.
+ // We should pick up the set of unqualified lookup results for operator
+ // co_await during the initial parse.
+ *BeginExpr = SemaRef.ActOnCoawaitExpr(SemaRef.getCurScope(), ColonLoc,
+ BeginExpr->get());
+ if (BeginExpr->isInvalid())
+ return Sema::FRS_DiagnosticIssued;
+ }
+ if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+ return Sema::FRS_Success;
+ };
+
+ auto BuildEnd = [&] {
+ *BEF = BEF_end;
+ Sema::ForRangeStatus RangeStatus =
+ SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo,
+ EndMemberLookup, CandidateSet,
+ EndRange, EndExpr);
+ if (RangeStatus != Sema::FRS_Success) {
+ if (RangeStatus == Sema::FRS_DiagnosticIssued)
+ SemaRef.Diag(EndRange->getBeginLoc(), diag::note_in_for_range)
+ << ColonLoc << BEF_end << EndRange->getType();
+ return RangeStatus;
+ }
+ if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
+ return Sema::FRS_DiagnosticIssued;
+ }
+ return Sema::FRS_Success;
+ };
+
if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
// - if _RangeT is a class type, the unqualified-ids begin and end are
// looked up in the scope of class _RangeT as if by class member access
@@ -2143,68 +2202,62 @@ BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange,
// declaration, begin-expr and end-expr are __range.begin() and
// __range.end(), respectively;
SemaRef.LookupQualifiedName(BeginMemberLookup, D);
+ if (BeginMemberLookup.isAmbiguous())
+ return Sema::FRS_DiagnosticIssued;
+
SemaRef.LookupQualifiedName(EndMemberLookup, D);
+ if (EndMemberLookup.isAmbiguous())
+ return Sema::FRS_DiagnosticIssued;
if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
- SourceLocation RangeLoc = BeginVar->getLocation();
- *BEF = BeginMemberLookup.empty() ? BEF_end : BEF_begin;
-
- SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch)
- << RangeLoc << BeginRange->getType() << *BEF;
- return Sema::FRS_DiagnosticIssued;
+ // Look up the non-member form of the member we didn't find, first.
+ // This way we prefer a "no viable 'end'" diagnostic over a "i found
+ // a 'begin' but ignored it because there was no member 'end'"
+ // diagnostic.
+ auto BuildNonmember = [&](
+ BeginEndFunction BEFFound, LookupResult &Found,
+ llvm::function_ref<Sema::ForRangeStatus()> BuildFound,
+ llvm::function_ref<Sema::ForRangeStatus()> BuildNotFound) {
+ LookupResult OldFound = std::move(Found);
+ Found.clear();
+
+ if (Sema::ForRangeStatus Result = BuildNotFound())
+ return Result;
+
+ switch (BuildFound()) {
+ case Sema::FRS_Success:
+ return Sema::FRS_Success;
+
+ case Sema::FRS_NoViableFunction:
+ SemaRef.Diag(BeginRange->getBeginLoc(), diag::err_for_range_invalid)
+ << BeginRange->getType() << BEFFound;
+ CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, BeginRange);
+ LLVM_FALLTHROUGH;
+
+ case Sema::FRS_DiagnosticIssued:
+ for (NamedDecl *D : OldFound) {
+ SemaRef.Diag(D->getLocation(),
+ diag::note_for_range_member_begin_end_ignored)
+ << BeginRange->getType() << BEFFound;
+ }
+ return Sema::FRS_DiagnosticIssued;
+ }
+ llvm_unreachable("unexpected ForRangeStatus");
+ };
+ if (BeginMemberLookup.empty())
+ return BuildNonmember(BEF_end, EndMemberLookup, BuildEnd, BuildBegin);
+ return BuildNonmember(BEF_begin, BeginMemberLookup, BuildBegin, BuildEnd);
}
} else {
// - otherwise, begin-expr and end-expr are begin(__range) and
// end(__range), respectively, where begin and end are looked up with
// argument-dependent lookup (3.4.2). For the purposes of this name
// lookup, namespace std is an associated namespace.
-
- }
-
- *BEF = BEF_begin;
- Sema::ForRangeStatus RangeStatus =
- SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, BeginNameInfo,
- BeginMemberLookup, CandidateSet,
- BeginRange, BeginExpr);
-
- if (RangeStatus != Sema::FRS_Success) {
- if (RangeStatus == Sema::FRS_DiagnosticIssued)
- SemaRef.Diag(BeginRange->getLocStart(), diag::note_in_for_range)
- << ColonLoc << BEF_begin << BeginRange->getType();
- return RangeStatus;
- }
- if (!CoawaitLoc.isInvalid()) {
- // FIXME: getCurScope() should not be used during template instantiation.
- // We should pick up the set of unqualified lookup results for operator
- // co_await during the initial parse.
- *BeginExpr = SemaRef.ActOnCoawaitExpr(SemaRef.getCurScope(), ColonLoc,
- BeginExpr->get());
- if (BeginExpr->isInvalid())
- return Sema::FRS_DiagnosticIssued;
- }
- if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
- diag::err_for_range_iter_deduction_failure)) {
- NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
- return Sema::FRS_DiagnosticIssued;
}
- *BEF = BEF_end;
- RangeStatus =
- SemaRef.BuildForRangeBeginEndCall(ColonLoc, ColonLoc, EndNameInfo,
- EndMemberLookup, CandidateSet,
- EndRange, EndExpr);
- if (RangeStatus != Sema::FRS_Success) {
- if (RangeStatus == Sema::FRS_DiagnosticIssued)
- SemaRef.Diag(EndRange->getLocStart(), diag::note_in_for_range)
- << ColonLoc << BEF_end << EndRange->getType();
- return RangeStatus;
- }
- if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
- diag::err_for_range_iter_deduction_failure)) {
- NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
- return Sema::FRS_DiagnosticIssued;
- }
- return Sema::FRS_Success;
+ if (Sema::ForRangeStatus Result = BuildBegin())
+ return Result;
+ return BuildEnd();
}
/// Speculatively attempt to dereference an invalid range expression.
@@ -2213,6 +2266,7 @@ BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange,
static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
SourceLocation ForLoc,
SourceLocation CoawaitLoc,
+ Stmt *InitStmt,
Stmt *LoopVarDecl,
SourceLocation ColonLoc,
Expr *Range,
@@ -2229,8 +2283,8 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
return StmtResult();
StmtResult SR = SemaRef.ActOnCXXForRangeStmt(
- S, ForLoc, CoawaitLoc, LoopVarDecl, ColonLoc, AdjustedRange.get(),
- RParenLoc, Sema::BFRK_Check);
+ S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc,
+ AdjustedRange.get(), RParenLoc, Sema::BFRK_Check);
if (SR.isInvalid())
return StmtResult();
}
@@ -2240,9 +2294,9 @@ static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
// case there are any other (non-fatal) problems with it.
SemaRef.Diag(RangeLoc, diag::err_for_range_dereference)
<< Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*");
- return SemaRef.ActOnCXXForRangeStmt(S, ForLoc, CoawaitLoc, LoopVarDecl,
- ColonLoc, AdjustedRange.get(), RParenLoc,
- Sema::BFRK_Rebuild);
+ return SemaRef.ActOnCXXForRangeStmt(
+ S, ForLoc, CoawaitLoc, InitStmt, LoopVarDecl, ColonLoc,
+ AdjustedRange.get(), RParenLoc, Sema::BFRK_Rebuild);
}
namespace {
@@ -2262,12 +2316,13 @@ struct InvalidateOnErrorScope {
}
/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
-StmtResult
-Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
- SourceLocation ColonLoc, Stmt *RangeDecl,
- Stmt *Begin, Stmt *End, Expr *Cond,
- Expr *Inc, Stmt *LoopVarDecl,
- SourceLocation RParenLoc, BuildForRangeKind Kind) {
+StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation CoawaitLoc, Stmt *InitStmt,
+ SourceLocation ColonLoc, Stmt *RangeDecl,
+ Stmt *Begin, Stmt *End, Expr *Cond,
+ Expr *Inc, Stmt *LoopVarDecl,
+ SourceLocation RParenLoc,
+ BuildForRangeKind Kind) {
// FIXME: This should not be used during template instantiation. We should
// pick up the set of unqualified lookup results for the != and + operators
// in the initial parse.
@@ -2451,8 +2506,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
QualType ArrayTy = PVD->getOriginalType();
QualType PointerTy = PVD->getType();
if (PointerTy->isPointerType() && ArrayTy->isArrayType()) {
- Diag(Range->getLocStart(), diag::err_range_on_array_parameter)
- << RangeLoc << PVD << ArrayTy << PointerTy;
+ Diag(Range->getBeginLoc(), diag::err_range_on_array_parameter)
+ << RangeLoc << PVD << ArrayTy << PointerTy;
Diag(PVD->getLocation(), diag::note_declared_at);
return StmtError();
}
@@ -2462,7 +2517,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
// If building the range failed, try dereferencing the range expression
// unless a diagnostic was issued or the end function is problematic.
StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc,
- CoawaitLoc,
+ CoawaitLoc, InitStmt,
LoopVarDecl, ColonLoc,
Range, RangeLoc,
RParenLoc);
@@ -2473,7 +2528,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
// Otherwise, emit diagnostics if we haven't already.
if (RangeStatus == FRS_NoViableFunction) {
Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get();
- Diag(Range->getLocStart(), diag::err_for_range_invalid)
+ Diag(Range->getBeginLoc(), diag::err_for_range_invalid)
<< RangeLoc << Range->getType() << BEFFailure;
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Range);
}
@@ -2519,7 +2574,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
if (!NotEqExpr.isInvalid())
NotEqExpr = CheckBooleanCondition(ColonLoc, NotEqExpr.get());
if (!NotEqExpr.isInvalid())
- NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get());
+ NotEqExpr =
+ ActOnFinishFullExpr(NotEqExpr.get(), /*DiscardedValue*/ false);
if (NotEqExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 0 << BeginRangeRef.get()->getType();
@@ -2542,7 +2598,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
// co_await during the initial parse.
IncrExpr = ActOnCoawaitExpr(S, CoawaitLoc, IncrExpr.get());
if (!IncrExpr.isInvalid())
- IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
+ IncrExpr = ActOnFinishFullExpr(IncrExpr.get(), /*DiscardedValue*/ false);
if (IncrExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 2 << BeginRangeRef.get()->getType() ;
@@ -2579,7 +2635,7 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc,
return StmtResult();
return new (Context) CXXForRangeStmt(
- RangeDS, cast_or_null<DeclStmt>(BeginDeclStmt.get()),
+ InitStmt, RangeDS, cast_or_null<DeclStmt>(BeginDeclStmt.get()),
cast_or_null<DeclStmt>(EndDeclStmt.get()), NotEqExpr.get(),
IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, CoawaitLoc,
ColonLoc, RParenLoc);
@@ -2660,7 +2716,7 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
NonReferenceType.removeLocalConst();
QualType NewReferenceType =
SemaRef.Context.getLValueReferenceType(E->getType().withConst());
- SemaRef.Diag(VD->getLocStart(), diag::note_use_type_or_non_reference)
+ SemaRef.Diag(VD->getBeginLoc(), diag::note_use_type_or_non_reference)
<< NonReferenceType << NewReferenceType << VD->getSourceRange();
} else {
// The range always returns a copy, so a temporary is always created.
@@ -2669,7 +2725,7 @@ static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
<< VD << RangeInitType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
- SemaRef.Diag(VD->getLocStart(), diag::note_use_non_reference_type)
+ SemaRef.Diag(VD->getBeginLoc(), diag::note_use_non_reference_type)
<< NonReferenceType << VD->getSourceRange();
}
}
@@ -2705,7 +2761,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
// if doing so will prevent a copy.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy)
<< VD << VariableType << InitExpr->getType();
- SemaRef.Diag(VD->getLocStart(), diag::note_use_reference_type)
+ SemaRef.Diag(VD->getBeginLoc(), diag::note_use_reference_type)
<< SemaRef.Context.getLValueReferenceType(VariableType)
<< VD->getSourceRange();
}
@@ -2721,11 +2777,11 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
const CXXForRangeStmt *ForStmt) {
if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy,
- ForStmt->getLocStart()) &&
+ ForStmt->getBeginLoc()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy,
- ForStmt->getLocStart()) &&
+ ForStmt->getBeginLoc()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_copy,
- ForStmt->getLocStart())) {
+ ForStmt->getBeginLoc())) {
return;
}
@@ -2797,7 +2853,7 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
return StmtError();
}
- ExprResult ExprRes = ActOnFinishFullExpr(E);
+ ExprResult ExprRes = ActOnFinishFullExpr(E, /*DiscardedValue*/ false);
if (ExprRes.isInvalid())
return StmtError();
E = ExprRes.get();
@@ -2951,7 +3007,7 @@ static void TryMoveInitialization(Sema& S,
Expr *InitExpr = &AsRvalue;
InitializationKind Kind = InitializationKind::CreateCopy(
- Value->getLocStart(), Value->getLocStart());
+ Value->getBeginLoc(), Value->getBeginLoc());
InitializationSequence Seq(S, Entity, Kind, InitExpr);
@@ -3147,12 +3203,14 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ExpressionEvaluationContext::DiscardedStatement &&
(HasDeducedReturnType || CurCap->HasImplicitReturnType)) {
if (RetValExp) {
- ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ ExprResult ER =
+ ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
- return new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ return ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
}
if (HasDeducedReturnType) {
@@ -3273,13 +3331,14 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (RetValExp) {
- ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ ExprResult ER =
+ ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
- ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
- NRVOCandidate);
+ auto *Result =
+ ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate);
// If we need to check for the named return value optimization,
// or if we need to infer the return type,
@@ -3503,12 +3562,14 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ExpressionEvaluationContext::DiscardedStatement &&
FnRetType->getContainedAutoType()) {
if (RetValExp) {
- ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ ExprResult ER =
+ ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
- return new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ return ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
}
// FIXME: Add a flag to the ScopeInfo to indicate whether we're performing
@@ -3596,14 +3657,16 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (RetValExp) {
- ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ ExprResult ER =
+ ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
}
- Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
+ Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp,
+ /* NRVOCandidate=*/nullptr);
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
@@ -3625,7 +3688,8 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
else
Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
- Result = new (Context) ReturnStmt(ReturnLoc);
+ Result = ReturnStmt::Create(Context, ReturnLoc, /* RetExpr=*/nullptr,
+ /* NRVOCandidate=*/nullptr);
} else {
assert(RetValExp || HasDependentReturnType);
const VarDecl *NRVOCandidate = nullptr;
@@ -3673,12 +3737,13 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
if (RetValExp) {
- ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
+ ExprResult ER =
+ ActOnFinishFullExpr(RetValExp, ReturnLoc, /*DiscardedValue*/ false);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
- Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
+ Result = ReturnStmt::Create(Context, ReturnLoc, RetValExp, NRVOCandidate);
}
// If we need to check for the named return value optimization, save the
@@ -3726,7 +3791,7 @@ StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
if (Result.isInvalid())
return StmtError();
- Result = ActOnFinishFullExpr(Result.get());
+ Result = ActOnFinishFullExpr(Result.get(), /*DiscardedValue*/ false);
if (Result.isInvalid())
return StmtError();
Throw = Result.get();
@@ -3798,7 +3863,7 @@ Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
}
// The operand to @synchronized is a full-expression.
- return ActOnFinishFullExpr(operand);
+ return ActOnFinishFullExpr(operand, /*DiscardedValue*/ false);
}
StmtResult
@@ -3969,7 +4034,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// declarations that are invalid, since we can't usefully report on them.
if (!H->getExceptionDecl()) {
if (i < NumHandlers - 1)
- return StmtError(Diag(H->getLocStart(), diag::err_early_catch_all));
+ return StmtError(Diag(H->getBeginLoc(), diag::err_early_catch_all));
continue;
} else if (H->getExceptionDecl()->isInvalidDecl())
continue;
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index 0db15ea1f646..9e084c99d0dd 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -27,6 +27,58 @@
using namespace clang;
using namespace sema;
+/// Remove the upper-level LValueToRValue cast from an expression.
+static void removeLValueToRValueCast(Expr *E) {
+ Expr *Parent = E;
+ Expr *ExprUnderCast = nullptr;
+ SmallVector<Expr *, 8> ParentsToUpdate;
+
+ while (true) {
+ ParentsToUpdate.push_back(Parent);
+ if (auto *ParenE = dyn_cast<ParenExpr>(Parent)) {
+ Parent = ParenE->getSubExpr();
+ continue;
+ }
+
+ Expr *Child = nullptr;
+ CastExpr *ParentCast = dyn_cast<CastExpr>(Parent);
+ if (ParentCast)
+ Child = ParentCast->getSubExpr();
+ else
+ return;
+
+ if (auto *CastE = dyn_cast<CastExpr>(Child))
+ if (CastE->getCastKind() == CK_LValueToRValue) {
+ ExprUnderCast = CastE->getSubExpr();
+ // LValueToRValue cast inside GCCAsmStmt requires an explicit cast.
+ ParentCast->setSubExpr(ExprUnderCast);
+ break;
+ }
+ Parent = Child;
+ }
+
+ // Update parent expressions to have same ValueType as the underlying.
+ assert(ExprUnderCast &&
+ "Should be reachable only if LValueToRValue cast was found!");
+ auto ValueKind = ExprUnderCast->getValueKind();
+ for (Expr *E : ParentsToUpdate)
+ E->setValueKind(ValueKind);
+}
+
+/// Emit a warning about usage of "noop"-like casts for lvalues (GNU extension)
+/// and fix the argument with removing LValueToRValue cast from the expression.
+static void emitAndFixInvalidAsmCastLValue(const Expr *LVal, Expr *BadArgument,
+ Sema &S) {
+ if (!S.getLangOpts().HeinousExtensions) {
+ S.Diag(LVal->getBeginLoc(), diag::err_invalid_asm_cast_lvalue)
+ << BadArgument->getSourceRange();
+ } else {
+ S.Diag(LVal->getBeginLoc(), diag::warn_invalid_asm_cast_lvalue)
+ << BadArgument->getSourceRange();
+ }
+ removeLValueToRValueCast(BadArgument);
+}
+
/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
/// ignore "noop" casts in places where an lvalue is required by an inline asm.
/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
@@ -34,7 +86,7 @@ using namespace sema;
///
/// This method checks to see if the argument is an acceptable l-value and
/// returns false if it is a case we can handle.
-static bool CheckAsmLValue(const Expr *E, Sema &S) {
+static bool CheckAsmLValue(Expr *E, Sema &S) {
// Type dependent expressions will be checked during instantiation.
if (E->isTypeDependent())
return false;
@@ -46,12 +98,7 @@ static bool CheckAsmLValue(const Expr *E, Sema &S) {
// are supposed to allow.
const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
if (E != E2 && E2->isLValue()) {
- if (!S.getLangOpts().HeinousExtensions)
- S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
- << E->getSourceRange();
- else
- S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
- << E->getSourceRange();
+ emitAndFixInvalidAsmCastLValue(E2, E, S);
// Accept, even if we emitted an error diagnostic.
return false;
}
@@ -90,13 +137,13 @@ static bool CheckNakedParmReference(Expr *E, Sema &S) {
while (WorkList.size()) {
Expr *E = WorkList.pop_back_val();
if (isa<CXXThisExpr>(E)) {
- S.Diag(E->getLocStart(), diag::err_asm_naked_this_ref);
+ S.Diag(E->getBeginLoc(), diag::err_asm_naked_this_ref);
S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
return true;
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (isa<ParmVarDecl>(DRE->getDecl())) {
- S.Diag(DRE->getLocStart(), diag::err_asm_naked_parm_ref);
+ S.Diag(DRE->getBeginLoc(), diag::err_asm_naked_parm_ref);
S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
return true;
}
@@ -131,7 +178,7 @@ static bool checkExprMemoryConstraintCompat(Sema &S, Expr *E,
EType = ExprGlobalRegVar;
if (EType != ExprSafeType) {
- S.Diag(E->getLocStart(), diag::err_asm_non_addr_value_in_memory_constraint)
+ S.Diag(E->getBeginLoc(), diag::err_asm_non_addr_value_in_memory_constraint)
<< EType << is_input_expr << Info.getConstraintStr()
<< E->getSourceRange();
return true;
@@ -185,7 +232,7 @@ getClobberConflictLocation(MultiExprArg Exprs, StringLiteral **Constraints,
Clobber = Target.getNormalizedGCCRegisterName(Clobber, true);
// Go over the output's registers we collected
if (InOutVars.count(Clobber))
- return Clobbers[i]->getLocStart();
+ return Clobbers[i]->getBeginLoc();
}
return SourceLocation();
}
@@ -226,9 +273,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
if (!Context.getTargetInfo().validateOutputConstraint(Info))
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_invalid_output_constraint)
- << Info.getConstraintStr());
+ return StmtError(
+ Diag(Literal->getBeginLoc(), diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr());
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
if (ER.isInvalid())
@@ -264,24 +311,18 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
break;
case Expr::MLV_LValueCast: {
const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
- if (!getLangOpts().HeinousExtensions) {
- Diag(LVal->getLocStart(), diag::err_invalid_asm_cast_lvalue)
- << OutputExpr->getSourceRange();
- } else {
- Diag(LVal->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
- << OutputExpr->getSourceRange();
- }
+ emitAndFixInvalidAsmCastLValue(LVal, OutputExpr, *this);
// Accept, even if we emitted an error diagnostic.
break;
}
case Expr::MLV_IncompleteType:
case Expr::MLV_IncompleteVoidType:
- if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
+ if (RequireCompleteType(OutputExpr->getBeginLoc(), Exprs[i]->getType(),
diag::err_dereference_incomplete_type))
return StmtError();
LLVM_FALLTHROUGH;
default:
- return StmtError(Diag(OutputExpr->getLocStart(),
+ return StmtError(Diag(OutputExpr->getBeginLoc(),
diag::err_asm_invalid_lvalue_in_output)
<< OutputExpr->getSourceRange());
}
@@ -289,9 +330,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
unsigned Size = Context.getTypeSize(OutputExpr->getType());
if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
Size))
- return StmtError(Diag(OutputExpr->getLocStart(),
- diag::err_asm_invalid_output_size)
- << Info.getConstraintStr());
+ return StmtError(
+ Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_output_size)
+ << Info.getConstraintStr());
}
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
@@ -307,9 +348,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos,
Info)) {
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_invalid_input_constraint)
- << Info.getConstraintStr());
+ return StmtError(
+ Diag(Literal->getBeginLoc(), diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr());
}
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
@@ -331,22 +372,23 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Only allow void types for memory constraints.
if (Info.allowsMemory() && !Info.allowsRegister()) {
if (CheckAsmLValue(InputExpr, *this))
- return StmtError(Diag(InputExpr->getLocStart(),
+ return StmtError(Diag(InputExpr->getBeginLoc(),
diag::err_asm_invalid_lvalue_in_input)
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
} else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
if (!InputExpr->isValueDependent()) {
- llvm::APSInt Result;
- if (!InputExpr->EvaluateAsInt(Result, Context))
- return StmtError(
- Diag(InputExpr->getLocStart(), diag::err_asm_immediate_expected)
- << Info.getConstraintStr() << InputExpr->getSourceRange());
- if (!Info.isValidAsmImmediate(Result))
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_invalid_asm_value_for_constraint)
- << Result.toString(10) << Info.getConstraintStr()
- << InputExpr->getSourceRange());
+ Expr::EvalResult EVResult;
+ if (!InputExpr->EvaluateAsRValue(EVResult, Context, true))
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_immediate_expected)
+ << Info.getConstraintStr() << InputExpr->getSourceRange());
+ llvm::APSInt Result = EVResult.Val.getInt();
+ if (!Info.isValidAsmImmediate(Result))
+ return StmtError(Diag(InputExpr->getBeginLoc(),
+ diag::err_invalid_asm_value_for_constraint)
+ << Result.toString(10) << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
}
} else {
@@ -359,10 +401,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (Info.allowsRegister()) {
if (InputExpr->getType()->isVoidType()) {
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_asm_invalid_type_in_input)
- << InputExpr->getType() << Info.getConstraintStr()
- << InputExpr->getSourceRange());
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
}
}
@@ -373,16 +415,16 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
continue;
if (!Ty->isVoidType() || !Info.allowsMemory())
- if (RequireCompleteType(InputExpr->getLocStart(), Exprs[i]->getType(),
+ if (RequireCompleteType(InputExpr->getBeginLoc(), Exprs[i]->getType(),
diag::err_dereference_incomplete_type))
return StmtError();
unsigned Size = Context.getTypeSize(Ty);
if (!Context.getTargetInfo().validateInputSize(Literal->getString(),
Size))
- return StmtError(Diag(InputExpr->getLocStart(),
- diag::err_asm_invalid_input_size)
- << Info.getConstraintStr());
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_input_size)
+ << Info.getConstraintStr());
}
// Check that the clobbers are valid.
@@ -393,8 +435,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
StringRef Clobber = Literal->getString();
if (!Context.getTargetInfo().isValidClobber(Clobber))
- return StmtError(Diag(Literal->getLocStart(),
- diag::err_asm_unknown_register_name) << Clobber);
+ return StmtError(
+ Diag(Literal->getBeginLoc(), diag::err_asm_unknown_register_name)
+ << Clobber);
}
GCCAsmStmt *NS =
@@ -446,7 +489,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (!Context.getTargetInfo().validateConstraintModifier(
Literal->getString(), Piece.getModifier(), Size,
SuggestedModifier)) {
- Diag(Exprs[ConstraintIdx]->getLocStart(),
+ Diag(Exprs[ConstraintIdx]->getBeginLoc(),
diag::warn_asm_mismatched_size_modifier);
if (!SuggestedModifier.empty()) {
@@ -469,7 +512,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (NumAlternatives == ~0U)
NumAlternatives = AltCount;
else if (NumAlternatives != AltCount)
- return StmtError(Diag(NS->getOutputExpr(i)->getLocStart(),
+ return StmtError(Diag(NS->getOutputExpr(i)->getBeginLoc(),
diag::err_asm_unexpected_constraint_alternatives)
<< NumAlternatives << AltCount);
}
@@ -482,7 +525,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (NumAlternatives == ~0U)
NumAlternatives = AltCount;
else if (NumAlternatives != AltCount)
- return StmtError(Diag(NS->getInputExpr(i)->getLocStart(),
+ return StmtError(Diag(NS->getInputExpr(i)->getBeginLoc(),
diag::err_asm_unexpected_constraint_alternatives)
<< NumAlternatives << AltCount);
@@ -499,10 +542,10 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Make sure no more than one input constraint matches each output.
assert(TiedTo < InputMatchedToOutput.size() && "TiedTo value out of range");
if (InputMatchedToOutput[TiedTo] != ~0U) {
- Diag(NS->getInputExpr(i)->getLocStart(),
+ Diag(NS->getInputExpr(i)->getBeginLoc(),
diag::err_asm_input_duplicate_match)
<< TiedTo;
- Diag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getLocStart(),
+ Diag(NS->getInputExpr(InputMatchedToOutput[TiedTo])->getBeginLoc(),
diag::note_asm_input_duplicate_first)
<< TiedTo;
return StmtError();
@@ -590,10 +633,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
continue;
}
- Diag(InputExpr->getLocStart(),
- diag::err_asm_tying_incompatible_types)
- << InTy << OutTy << OutputExpr->getSourceRange()
- << InputExpr->getSourceRange();
+ Diag(InputExpr->getBeginLoc(), diag::err_asm_tying_incompatible_types)
+ << InTy << OutTy << OutputExpr->getSourceRange()
+ << InputExpr->getSourceRange();
return StmtError();
}
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index e39a65c6ce0c..a8e54b36b29b 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -16,7 +16,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Lookup.h"
-#include "clang/Sema/LoopHint.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/StringExtras.h"
@@ -29,7 +28,7 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
A.getAttributeSpellingListIndex());
if (!isa<NullStmt>(St)) {
S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_wrong_target)
- << Attr.getSpelling() << St->getLocStart();
+ << Attr.getSpelling() << St->getBeginLoc();
if (isa<SwitchCase>(St)) {
SourceLocation L = S.getLocForEndOfToken(Range.getEnd());
S.Diag(L, diag::note_fallthrough_insert_semi_fixit)
@@ -56,8 +55,7 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
static Attr *handleSuppressAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
if (A.getNumArgs() < 1) {
- S.Diag(A.getLoc(), diag::err_attribute_too_few_arguments)
- << A.getName() << 1;
+ S.Diag(A.getLoc(), diag::err_attribute_too_few_arguments) << A << 1;
return nullptr;
}
@@ -87,6 +85,9 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
bool PragmaUnroll = PragmaNameLoc->Ident->getName() == "unroll";
bool PragmaNoUnroll = PragmaNameLoc->Ident->getName() == "nounroll";
+ bool PragmaUnrollAndJam = PragmaNameLoc->Ident->getName() == "unroll_and_jam";
+ bool PragmaNoUnrollAndJam =
+ PragmaNameLoc->Ident->getName() == "nounroll_and_jam";
if (St->getStmtClass() != Stmt::DoStmtClass &&
St->getStmtClass() != Stmt::ForStmtClass &&
St->getStmtClass() != Stmt::CXXForRangeStmtClass &&
@@ -95,8 +96,10 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
llvm::StringSwitch<const char *>(PragmaNameLoc->Ident->getName())
.Case("unroll", "#pragma unroll")
.Case("nounroll", "#pragma nounroll")
+ .Case("unroll_and_jam", "#pragma unroll_and_jam")
+ .Case("nounroll_and_jam", "#pragma nounroll_and_jam")
.Default("#pragma clang loop");
- S.Diag(St->getLocStart(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
+ S.Diag(St->getBeginLoc(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
return nullptr;
}
@@ -118,6 +121,20 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
Option = LoopHintAttr::Unroll;
State = LoopHintAttr::Enable;
}
+ } else if (PragmaNoUnrollAndJam) {
+ // #pragma nounroll_and_jam
+ Option = LoopHintAttr::UnrollAndJam;
+ State = LoopHintAttr::Disable;
+ } else if (PragmaUnrollAndJam) {
+ if (ValueExpr) {
+ // #pragma unroll_and_jam N
+ Option = LoopHintAttr::UnrollAndJamCount;
+ State = LoopHintAttr::Numeric;
+ } else {
+ // #pragma unroll_and_jam
+ Option = LoopHintAttr::UnrollAndJam;
+ State = LoopHintAttr::Enable;
+ }
} else {
// #pragma clang loop ...
assert(OptionLoc && OptionLoc->Ident &&
@@ -130,19 +147,24 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
.Case("interleave_count", LoopHintAttr::InterleaveCount)
.Case("unroll", LoopHintAttr::Unroll)
.Case("unroll_count", LoopHintAttr::UnrollCount)
+ .Case("pipeline", LoopHintAttr::PipelineDisabled)
+ .Case("pipeline_initiation_interval",
+ LoopHintAttr::PipelineInitiationInterval)
.Case("distribute", LoopHintAttr::Distribute)
.Default(LoopHintAttr::Vectorize);
if (Option == LoopHintAttr::VectorizeWidth ||
Option == LoopHintAttr::InterleaveCount ||
- Option == LoopHintAttr::UnrollCount) {
+ Option == LoopHintAttr::UnrollCount ||
+ Option == LoopHintAttr::PipelineInitiationInterval) {
assert(ValueExpr && "Attribute must have a valid value expression.");
- if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart()))
+ if (S.CheckLoopHintExpr(ValueExpr, St->getBeginLoc()))
return nullptr;
State = LoopHintAttr::Numeric;
} else if (Option == LoopHintAttr::Vectorize ||
Option == LoopHintAttr::Interleave ||
Option == LoopHintAttr::Unroll ||
- Option == LoopHintAttr::Distribute) {
+ Option == LoopHintAttr::Distribute ||
+ Option == LoopHintAttr::PipelineDisabled) {
assert(StateLoc && StateLoc->Ident && "Loop hint must have an argument");
if (StateLoc->Ident->isStr("disable"))
State = LoopHintAttr::Disable;
@@ -165,21 +187,20 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
static void
CheckForIncompatibleAttributes(Sema &S,
const SmallVectorImpl<const Attr *> &Attrs) {
- // There are 4 categories of loop hints attributes: vectorize, interleave,
- // unroll and distribute. Except for distribute they come in two variants: a
- // state form and a numeric form. The state form selectively
- // defaults/enables/disables the transformation for the loop (for unroll,
- // default indicates full unrolling rather than enabling the transformation).
- // The numeric form form provides an integer hint (for example, unroll count)
- // to the transformer. The following array accumulates the hints encountered
- // while iterating through the attributes to check for compatibility.
+ // There are 6 categories of loop hints attributes: vectorize, interleave,
+ // unroll, unroll_and_jam, pipeline and distribute. Except for distribute they
+ // come in two variants: a state form and a numeric form. The state form
+ // selectively defaults/enables/disables the transformation for the loop
+ // (for unroll, default indicates full unrolling rather than enabling the
+ // transformation). The numeric form form provides an integer hint (for
+ // example, unroll count) to the transformer. The following array accumulates
+ // the hints encountered while iterating through the attributes to check for
+ // compatibility.
struct {
const LoopHintAttr *StateAttr;
const LoopHintAttr *NumericAttr;
- } HintAttrs[] = {{nullptr, nullptr},
- {nullptr, nullptr},
- {nullptr, nullptr},
- {nullptr, nullptr}};
+ } HintAttrs[] = {{nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr},
+ {nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr}};
for (const auto *I : Attrs) {
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(I);
@@ -189,7 +210,14 @@ CheckForIncompatibleAttributes(Sema &S,
continue;
LoopHintAttr::OptionType Option = LH->getOption();
- enum { Vectorize, Interleave, Unroll, Distribute } Category;
+ enum {
+ Vectorize,
+ Interleave,
+ Unroll,
+ UnrollAndJam,
+ Distribute,
+ Pipeline
+ } Category;
switch (Option) {
case LoopHintAttr::Vectorize:
case LoopHintAttr::VectorizeWidth:
@@ -203,16 +231,27 @@ CheckForIncompatibleAttributes(Sema &S,
case LoopHintAttr::UnrollCount:
Category = Unroll;
break;
+ case LoopHintAttr::UnrollAndJam:
+ case LoopHintAttr::UnrollAndJamCount:
+ Category = UnrollAndJam;
+ break;
case LoopHintAttr::Distribute:
// Perform the check for duplicated 'distribute' hints.
Category = Distribute;
break;
+ case LoopHintAttr::PipelineDisabled:
+ case LoopHintAttr::PipelineInitiationInterval:
+ Category = Pipeline;
+ break;
};
+ assert(Category < sizeof(HintAttrs) / sizeof(HintAttrs[0]));
auto &CategoryState = HintAttrs[Category];
const LoopHintAttr *PrevAttr;
if (Option == LoopHintAttr::Vectorize ||
Option == LoopHintAttr::Interleave || Option == LoopHintAttr::Unroll ||
+ Option == LoopHintAttr::UnrollAndJam ||
+ Option == LoopHintAttr::PipelineDisabled ||
Option == LoopHintAttr::Distribute) {
// Enable|Disable|AssumeSafety hint. For example, vectorize(enable).
PrevAttr = CategoryState.StateAttr;
@@ -232,7 +271,7 @@ CheckForIncompatibleAttributes(Sema &S,
<< LH->getDiagnosticName(Policy);
if (CategoryState.StateAttr && CategoryState.NumericAttr &&
- (Category == Unroll ||
+ (Category == Unroll || Category == UnrollAndJam ||
CategoryState.StateAttr->getState() == LoopHintAttr::Disable)) {
// Disable hints are not compatible with numeric hints of the same
// category. As a special case, numeric unroll hints are also not
@@ -257,8 +296,7 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
unsigned NumArgs = A.getNumArgs();
if (NumArgs > 1) {
- S.Diag(A.getLoc(), diag::err_attribute_too_many_arguments) << A.getName()
- << 1;
+ S.Diag(A.getLoc(), diag::err_attribute_too_many_arguments) << A << 1;
return nullptr;
}
@@ -270,7 +308,7 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
if (!E->isIntegerConstantExpr(ArgVal, S.Context)) {
S.Diag(A.getLoc(), diag::err_attribute_argument_type)
- << A.getName() << AANT_ArgumentIntegerConstant << E->getSourceRange();
+ << A << AANT_ArgumentIntegerConstant << E->getSourceRange();
return nullptr;
}
@@ -279,7 +317,7 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
if (Val <= 0) {
S.Diag(A.getRange().getBegin(),
diag::err_attribute_requires_positive_integer)
- << A.getName();
+ << A << /* positive */ 0;
return nullptr;
}
UnrollFactor = Val;
@@ -292,9 +330,10 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
switch (A.getKind()) {
case ParsedAttr::UnknownAttribute:
- S.Diag(A.getLoc(), A.isDeclspecAttribute() ?
- diag::warn_unhandled_ms_attribute_ignored :
- diag::warn_unknown_attribute_ignored) << A.getName();
+ S.Diag(A.getLoc(), A.isDeclspecAttribute()
+ ? (unsigned)diag::warn_unhandled_ms_attribute_ignored
+ : (unsigned)diag::warn_unknown_attribute_ignored)
+ << A.getName();
return nullptr;
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
@@ -308,7 +347,7 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
// if we're here, then we parsed a known attribute, but didn't recognize
// it as a statement attribute => it is declaration attribute
S.Diag(A.getRange().getBegin(), diag::err_decl_attribute_invalid_on_stmt)
- << A.getName() << St->getLocStart();
+ << A.getName() << St->getBeginLoc();
return nullptr;
}
}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index fa002de3f5f1..3f9dc989103f 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -194,7 +194,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
QualType ObjectType = ObjectTypePtr.get();
- LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
+ LookupResult R(*this, TName, Name.getBeginLoc(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization))
return TNK_Non_template;
@@ -539,9 +539,8 @@ void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
// If this is a dependent-scope lookup, diagnose that the 'template' keyword
// was missing.
if (MissingTemplateKeyword) {
- Diag(NameInfo.getLocStart(), diag::err_template_kw_missing)
- << "" << NameInfo.getName().getAsString()
- << SourceRange(Less, Greater);
+ Diag(NameInfo.getBeginLoc(), diag::err_template_kw_missing)
+ << "" << NameInfo.getName().getAsString() << SourceRange(Less, Greater);
return;
}
@@ -628,7 +627,7 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->isInstance()) {
- QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
+ QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType();
// Since the 'this' expression is synthesized, we don't need to
// perform the double-lookup check.
@@ -892,7 +891,7 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
// convertTypeTemplateArgumentToTemplate.
return ParsedTemplateArgument(ParsedTemplateArgument::Type,
ParsedType.get().getAsOpaquePtr(),
- TInfo->getTypeLoc().getLocStart());
+ TInfo->getTypeLoc().getBeginLoc());
}
/// ActOnTypeParameter - Called when a C++ template type parameter
@@ -974,7 +973,7 @@ NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc) {
if (TSI->getType()->isUndeducedType()) {
- // C++1z [temp.dep.expr]p3:
+ // C++17 [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains
// - an identifier associated by name lookup with a non-type
// template-parameter declared with a type that contains a
@@ -1113,12 +1112,10 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
IdentifierInfo *ParamName = D.getIdentifier();
bool IsParameterPack = D.hasEllipsis();
- NonTypeTemplateParmDecl *Param
- = NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
- D.getLocStart(),
- D.getIdentifierLoc(),
- Depth, Position, ParamName, T,
- IsParameterPack, TInfo);
+ NonTypeTemplateParmDecl *Param = NonTypeTemplateParmDecl::Create(
+ Context, Context.getTranslationUnitDecl(), D.getBeginLoc(),
+ D.getIdentifierLoc(), Depth, Position, ParamName, T, IsParameterPack,
+ TInfo);
Param->setAccess(AS_public);
if (Invalid)
@@ -1258,9 +1255,10 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
RAngleLoc, RequiresClause);
}
-static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
+static void SetNestedNameSpecifier(Sema &S, TagDecl *T,
+ const CXXScopeSpec &SS) {
if (SS.isSet())
- T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext()));
+ T->setQualifierInfo(SS.getWithLocInContext(S.Context));
}
DeclResult Sema::CheckClassTemplate(
@@ -1459,10 +1457,11 @@ DeclResult Sema::CheckClassTemplate(
}();
if (RedeclACMismatch) {
- Diag(CurAC ? CurAC->getLocStart() : NameLoc,
+ Diag(CurAC ? CurAC->getBeginLoc() : NameLoc,
diag::err_template_different_associated_constraints);
- Diag(PrevAC ? PrevAC->getLocStart() : PrevClassTemplate->getLocation(),
- diag::note_template_prev_declaration) << /*declaration*/0;
+ Diag(PrevAC ? PrevAC->getBeginLoc() : PrevClassTemplate->getLocation(),
+ diag::note_template_prev_declaration)
+ << /*declaration*/ 0;
return true;
}
@@ -1489,19 +1488,19 @@ DeclResult Sema::CheckClassTemplate(
NamedDecl *Hidden = nullptr;
if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
SkipBody->ShouldSkip = true;
+ SkipBody->Previous = Def;
auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate();
assert(Tmpl && "original definition of a class template is not a "
"class template?");
makeMergedDefinitionVisible(Hidden);
makeMergedDefinitionVisible(Tmpl);
- return Def;
+ } else {
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // FIXME: Would it make sense to try to "forget" the previous
+ // definition, as part of error recovery?
+ return true;
}
-
- Diag(NameLoc, diag::err_redefinition) << Name;
- Diag(Def->getLocation(), diag::note_previous_definition);
- // FIXME: Would it make sense to try to "forget" the previous
- // definition, as part of error recovery?
- return true;
}
}
} else if (PrevDecl) {
@@ -1522,13 +1521,14 @@ DeclResult Sema::CheckClassTemplate(
if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
- PrevClassTemplate ? PrevClassTemplate->getTemplateParameters()
- : nullptr,
+ PrevClassTemplate
+ ? PrevClassTemplate->getMostRecentDecl()->getTemplateParameters()
+ : nullptr,
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
- : TUK == TUK_Friend ? TPC_FriendClassTemplate
- : TPC_ClassTemplate))
+ : TUK == TUK_Friend ? TPC_FriendClassTemplate : TPC_ClassTemplate,
+ SkipBody))
Invalid = true;
if (SS.isSet()) {
@@ -1555,7 +1555,7 @@ DeclResult Sema::CheckClassTemplate(
PrevClassTemplate && ShouldAddRedecl ?
PrevClassTemplate->getTemplatedDecl() : nullptr,
/*DelayTypeCreation=*/true);
- SetNestedNameSpecifier(NewClass, SS);
+ SetNestedNameSpecifier(*this, NewClass, SS);
if (NumOuterTemplateParamLists > 0)
NewClass->setTemplateParameterListsInfo(
Context, llvm::makeArrayRef(OuterTemplateParamLists,
@@ -1563,7 +1563,7 @@ DeclResult Sema::CheckClassTemplate(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition) {
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(NewClass);
AddMsStructLayoutForRecord(NewClass);
}
@@ -1606,7 +1606,7 @@ DeclResult Sema::CheckClassTemplate(
NewClass->setLexicalDeclContext(CurContext);
NewTemplate->setLexicalDeclContext(CurContext);
- if (TUK == TUK_Definition)
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
NewClass->startDefinition();
ProcessDeclAttributeList(S, NewClass, Attr);
@@ -1655,6 +1655,9 @@ DeclResult Sema::CheckClassTemplate(
ActOnDocumentableDecl(NewTemplate);
+ if (SkipBody && SkipBody->ShouldSkip)
+ return SkipBody->Previous;
+
return NewTemplate;
}
@@ -1763,8 +1766,8 @@ struct ConvertConstructorToDeductionGuideTransform {
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
return buildDeductionGuide(TemplateParams, CD->isExplicit(), NewTInfo,
- CD->getLocStart(), CD->getLocation(),
- CD->getLocEnd());
+ CD->getBeginLoc(), CD->getLocation(),
+ CD->getEndLoc());
}
/// Build a deduction guide with the specified parameter types.
@@ -1806,8 +1809,8 @@ private:
// TemplateTypeParmDecl's index cannot be changed after creation, so
// substitute it directly.
auto *NewTTP = TemplateTypeParmDecl::Create(
- SemaRef.Context, DC, TTP->getLocStart(), TTP->getLocation(),
- /*Depth*/0, Depth1IndexAdjustment + TTP->getIndex(),
+ SemaRef.Context, DC, TTP->getBeginLoc(), TTP->getLocation(),
+ /*Depth*/ 0, Depth1IndexAdjustment + TTP->getIndex(),
TTP->getIdentifier(), TTP->wasDeclaredWithTypename(),
TTP->isParameterPack());
if (TTP->hasDefaultArgument()) {
@@ -1876,7 +1879,7 @@ private:
EPI.HasTrailingReturn = true;
QualType Result = SemaRef.BuildFunctionType(
- ReturnType, ParamTypes, TL.getLocStart(), DeductionGuideName, EPI);
+ ReturnType, ParamTypes, TL.getBeginLoc(), DeductionGuideName, EPI);
if (Result.isNull())
return QualType();
@@ -2152,10 +2155,17 @@ static bool DiagnoseUnexpandedParameterPacks(Sema &S,
/// \param TPC Describes the context in which we are checking the given
/// template parameter list.
///
+/// \param SkipBody If we might have already made a prior merged definition
+/// of this template visible, the corresponding body-skipping information.
+/// Default argument redefinition is not an error when skipping such a body,
+/// because (under the ODR) we can assume the default arguments are the same
+/// as the prior merged definition.
+///
/// \returns true if an error occurred, false otherwise.
bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
- TemplateParamListContext TPC) {
+ TemplateParamListContext TPC,
+ SkipBodyInfo *SkipBody) {
bool Invalid = false;
// C++ [temp.param]p10:
@@ -2205,7 +2215,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
"Parameter packs can't have a default argument!");
SawParameterPack = true;
} else if (OldTypeParm && hasVisibleDefaultArgument(OldTypeParm) &&
- NewTypeParm->hasDefaultArgument()) {
+ NewTypeParm->hasDefaultArgument() &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
@@ -2249,7 +2260,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
if (!NewNonTypeParm->isPackExpansion())
SawParameterPack = true;
} else if (OldNonTypeParm && hasVisibleDefaultArgument(OldNonTypeParm) &&
- NewNonTypeParm->hasDefaultArgument()) {
+ NewNonTypeParm->hasDefaultArgument() &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
@@ -2292,7 +2304,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
SawParameterPack = true;
} else if (OldTemplateParm &&
hasVisibleDefaultArgument(OldTemplateParm) &&
- NewTemplateParm->hasDefaultArgument()) {
+ NewTemplateParm->hasDefaultArgument() &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
SawDefaultArgument = true;
@@ -3040,8 +3053,42 @@ static Expr *lookThroughRangesV3Condition(Preprocessor &PP, Expr *Cond) {
return Cond;
}
+namespace {
+
+// A PrinterHelper that prints more helpful diagnostics for some sub-expressions
+// within failing boolean expression, such as substituting template parameters
+// for actual types.
+class FailedBooleanConditionPrinterHelper : public PrinterHelper {
+public:
+ explicit FailedBooleanConditionPrinterHelper(const PrintingPolicy &P)
+ : Policy(P) {}
+
+ bool handledStmt(Stmt *E, raw_ostream &OS) override {
+ const auto *DR = dyn_cast<DeclRefExpr>(E);
+ if (DR && DR->getQualifier()) {
+ // If this is a qualified name, expand the template arguments in nested
+ // qualifiers.
+ DR->getQualifier()->print(OS, Policy, true);
+ // Then print the decl itself.
+ const ValueDecl *VD = DR->getDecl();
+ OS << VD->getName();
+ if (const auto *IV = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
+ // This is a template variable, print the expanded template arguments.
+ printTemplateArgumentList(OS, IV->getTemplateArgs().asArray(), Policy);
+ }
+ return true;
+ }
+ return false;
+ }
+
+private:
+ const PrintingPolicy Policy;
+};
+
+} // end anonymous namespace
+
std::pair<Expr *, std::string>
-Sema::findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond) {
+Sema::findFailedBooleanCondition(Expr *Cond) {
Cond = lookThroughRangesV3Condition(PP, Cond);
// Separate out all of the terms in a conjunction.
@@ -3070,18 +3117,16 @@ Sema::findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond) {
break;
}
}
-
- if (!FailedCond) {
- if (!AllowTopLevelCond)
- return { nullptr, "" };
-
+ if (!FailedCond)
FailedCond = Cond->IgnoreParenImpCasts();
- }
std::string Description;
{
llvm::raw_string_ostream Out(Description);
- FailedCond->printPretty(Out, nullptr, getPrintingPolicy());
+ PrintingPolicy Policy = getPrintingPolicy();
+ Policy.PrintCanonicalTypes = true;
+ FailedBooleanConditionPrinterHelper Helper(Policy);
+ FailedCond->printPretty(Out, &Helper, Policy, 0, "\n", nullptr);
}
return { FailedCond, Description };
}
@@ -3165,9 +3210,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
- findFailedBooleanCondition(
- TemplateArgs[0].getSourceExpression(),
- /*AllowTopLevelCond=*/true);
+ findFailedBooleanCondition(TemplateArgs[0].getSourceExpression());
// Remove the old SFINAE diagnostic.
PartialDiagnosticAt OldDiag =
@@ -3247,13 +3290,11 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// This is the first time we have referenced this class template
// specialization. Create the canonical declaration and add it to
// the set of specializations.
- Decl = ClassTemplateSpecializationDecl::Create(Context,
- ClassTemplate->getTemplatedDecl()->getTagKind(),
- ClassTemplate->getDeclContext(),
- ClassTemplate->getTemplatedDecl()->getLocStart(),
- ClassTemplate->getLocation(),
- ClassTemplate,
- Converted, nullptr);
+ Decl = ClassTemplateSpecializationDecl::Create(
+ Context, ClassTemplate->getTemplatedDecl()->getTagKind(),
+ ClassTemplate->getDeclContext(),
+ ClassTemplate->getTemplatedDecl()->getBeginLoc(),
+ ClassTemplate->getLocation(), ClassTemplate, Converted, nullptr);
ClassTemplate->AddSpecialization(Decl, InsertPos);
if (ClassTemplate->isOutOfLine())
Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
@@ -4218,12 +4259,12 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
// a "not a template" case. FIXME: Refactor isTemplateName so we don't
// need to do this.
DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
- LookupResult R(*this, DNI.getName(), Name.getLocStart(),
+ LookupResult R(*this, DNI.getName(), Name.getBeginLoc(),
LookupOrdinaryName);
bool MOUS;
if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
MOUS, TemplateKWLoc))
- Diag(Name.getLocStart(), diag::err_no_member)
+ Diag(Name.getBeginLoc(), diag::err_no_member)
<< DNI.getName() << LookupCtx << SS.getRange();
return TNK_Non_template;
} else {
@@ -4241,10 +4282,11 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
// We don't get here if naming the constructor would be valid, so we
// just reject immediately and recover by treating the
// injected-class-name as naming the template.
- Diag(Name.getLocStart(),
+ Diag(Name.getBeginLoc(),
diag::ext_out_of_line_qualified_id_type_names_constructor)
- << Name.Identifier << 0 /*injected-class-name used as template name*/
- << 1 /*'template' keyword was used*/;
+ << Name.Identifier
+ << 0 /*injected-class-name used as template name*/
+ << 1 /*'template' keyword was used*/;
}
return TNK;
}
@@ -4270,11 +4312,9 @@ TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
break;
}
- Diag(Name.getLocStart(),
- diag::err_template_kw_refers_to_non_template)
- << GetNameFromUnqualifiedId(Name).getName()
- << Name.getSourceRange()
- << TemplateKWLoc;
+ Diag(Name.getBeginLoc(), diag::err_template_kw_refers_to_non_template)
+ << GetNameFromUnqualifiedId(Name).getName() << Name.getSourceRange()
+ << TemplateKWLoc;
return TNK_Non_template;
}
@@ -4425,7 +4465,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
- if (ArgType->getType()->isDependentType()) {
+ if (ArgType->getType()->isInstantiationDependentType()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
Param, Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
@@ -5629,8 +5669,8 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
std::string Code = "static_cast<" + ParamType.getAsString() + ">(";
S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant)
- << ParamType << FixItHint::CreateInsertion(Arg->getLocStart(), Code)
- << FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getLocEnd()),
+ << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), Code)
+ << FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getEndLoc()),
")");
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_NullPointer;
@@ -5670,9 +5710,9 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
unsigned ArgQuals = ArgType.getCVRQualifiers();
if ((ParamQuals | ArgQuals) != ParamQuals) {
- S.Diag(Arg->getLocStart(),
+ S.Diag(Arg->getBeginLoc(),
diag::err_template_arg_ref_bind_ignores_quals)
- << ParamType << Arg->getType() << Arg->getSourceRange();
+ << ParamType << Arg->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
@@ -5686,11 +5726,11 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
ParamType.getNonReferenceType())) {
// We can't perform this conversion or binding.
if (ParamType->isReferenceType())
- S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind)
- << ParamType << ArgIn->getType() << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_no_ref_bind)
+ << ParamType << ArgIn->getType() << Arg->getSourceRange();
else
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
- << ArgIn->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_convertible)
+ << ArgIn->getType() << ParamType << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
@@ -5736,8 +5776,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
}
if (FirstOpLoc.isValid()) {
if (ExtWarnMSTemplateArg)
- S.Diag(ArgIn->getLocStart(), diag::ext_ms_deref_template_argument)
- << ArgIn->getSourceRange();
+ S.Diag(ArgIn->getBeginLoc(), diag::ext_ms_deref_template_argument)
+ << ArgIn->getSourceRange();
if (FirstOpKind == UO_AddrOf)
AddressTaken = true;
@@ -5745,8 +5785,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// We cannot let pointers get dereferenced here, that is obviously not a
// constant expression.
assert(FirstOpKind == UO_Deref);
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
}
}
} else {
@@ -5770,7 +5810,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
if (!Invalid && !ExtraParens) {
- S.Diag(Arg->getLocStart(),
+ S.Diag(Arg->getBeginLoc(),
S.getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_template_arg_extra_parens
: diag::ext_template_arg_extra_parens)
@@ -5836,16 +5876,16 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
}
if (!DRE) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// Cannot refer to non-static data members
if (isa<FieldDecl>(Entity) || isa<IndirectFieldDecl>(Entity)) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_field)
- << Entity << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_field)
+ << Entity << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
@@ -5853,8 +5893,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// Cannot refer to non-static member functions
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) {
if (!Method->isStatic()) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_method)
- << Method << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_method)
+ << Method << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
@@ -5866,23 +5906,24 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// A non-type template argument must refer to an object or function.
if (!Func && !Var) {
// We found something, but we don't know specifically what it is.
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_object_or_func)
+ << Arg->getSourceRange();
S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
return true;
}
// Address / reference template args must have external linkage in C++98.
if (Entity->getFormalLinkage() == InternalLinkage) {
- S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_template_arg_object_internal :
- diag::ext_template_arg_object_internal)
- << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(),
+ S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_template_arg_object_internal
+ : diag::ext_template_arg_object_internal)
+ << !Func << Entity << Arg->getSourceRange();
S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
<< !Func;
} else if (!Entity->hasLinkage()) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage)
- << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_object_no_linkage)
+ << !Func << Entity << Arg->getSourceRange();
S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
<< !Func;
return true;
@@ -5914,17 +5955,16 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
} else {
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
- S.Diag(Arg->getLocStart(),
- diag::err_template_arg_reference_var)
- << Var->getType() << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_reference_var)
+ << Var->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// A template argument must have static storage duration.
if (Var->getTLSKind()) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_thread_local)
+ << Arg->getSourceRange();
S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
return true;
}
@@ -5961,15 +6001,14 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// taking the address of the entity.
ArgType = S.Context.getPointerType(Var->getType());
if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
- << ParamType;
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
+ << ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
- S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
- << ParamType
- << FixItHint::CreateInsertion(Arg->getLocStart(), "&");
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
+ << ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
S.Diag(Param->getLocation(), diag::note_template_param_here);
}
@@ -5983,7 +6022,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// Create the template argument.
Converted =
TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType);
- S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity, false);
+ S.MarkAnyDeclReferenced(Arg->getBeginLoc(), Entity, false);
return false;
}
@@ -6012,11 +6051,11 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
if (!Invalid && !ExtraParens) {
- S.Diag(Arg->getLocStart(),
- S.getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_template_arg_extra_parens :
- diag::ext_template_arg_extra_parens)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(),
+ S.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_template_arg_extra_parens
+ : diag::ext_template_arg_extra_parens)
+ << Arg->getSourceRange();
ExtraParens = true;
}
@@ -6078,16 +6117,16 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
} else if (!S.Context.hasSameUnqualifiedType(
ResultArg->getType(), ParamType.getNonReferenceType())) {
// We can't perform this conversion.
- S.Diag(ResultArg->getLocStart(), diag::err_template_arg_not_convertible)
+ S.Diag(ResultArg->getBeginLoc(), diag::err_template_arg_not_convertible)
<< ResultArg->getType() << ParamType << ResultArg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
if (!DRE)
- return S.Diag(Arg->getLocStart(),
+ return S.Diag(Arg->getBeginLoc(),
diag::err_template_arg_not_pointer_to_member_form)
- << Arg->getSourceRange();
+ << Arg->getSourceRange();
if (isa<FieldDecl>(DRE->getDecl()) ||
isa<IndirectFieldDecl>(DRE->getDecl()) ||
@@ -6109,9 +6148,8 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
}
// We found something else, but we don't know specifically what it is.
- S.Diag(Arg->getLocStart(),
- diag::err_template_arg_not_pointer_to_member_form)
- << Arg->getSourceRange();
+ S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
return true;
}
@@ -6127,7 +6165,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK) {
- SourceLocation StartLoc = Arg->getLocStart();
+ SourceLocation StartLoc = Arg->getBeginLoc();
// If the parameter type somehow involves auto, deduce the type now.
if (getLangOpts().CPlusPlus17 && ParamType->isUndeducedType()) {
@@ -6248,7 +6286,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// FIXME: We need TemplateArgument representation and mangling for these.
if (!Value.getMemberPointerPath().empty()) {
- Diag(Arg->getLocStart(),
+ Diag(Arg->getBeginLoc(),
diag::err_template_arg_member_ptr_base_derived_not_supported)
<< Value.getMemberPointerDecl() << ParamType
<< Arg->getSourceRange();
@@ -6274,8 +6312,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Converted = TemplateArgument(ArgResult.get());
break;
}
- Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
- << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
return ExprError();
}
auto *VD = const_cast<ValueDecl *>(
@@ -6384,9 +6422,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- the name of a non-type template-parameter; or
llvm::APSInt Value;
if (!ArgType->isIntegralOrEnumerationType()) {
- Diag(Arg->getLocStart(),
- diag::err_template_arg_not_integral_or_enumeral)
- << ArgType << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::err_template_arg_not_integral_or_enumeral)
+ << ArgType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
} else if (!Arg->isValueDependent()) {
@@ -6424,9 +6461,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).get();
} else {
// We can't perform this conversion.
- Diag(Arg->getLocStart(),
- diag::err_template_arg_not_convertible)
- << Arg->getType() << ParamType << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
@@ -6465,9 +6501,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Complain if an unsigned parameter received a negative value.
if (IntegerType->isUnsignedIntegerOrEnumerationType()
&& (OldValue.isSigned() && OldValue.isNegative())) {
- Diag(Arg->getLocStart(), diag::warn_template_arg_negative)
- << OldValue.toString(10) << Value.toString(10) << Param->getType()
- << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::warn_template_arg_negative)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
@@ -6480,10 +6516,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
else
RequiredBits = OldValue.getMinSignedBits();
if (RequiredBits > AllowedBits) {
- Diag(Arg->getLocStart(),
- diag::warn_template_arg_too_large)
- << OldValue.toString(10) << Value.toString(10) << Param->getType()
- << Arg->getSourceRange();
+ Diag(Arg->getBeginLoc(), diag::warn_template_arg_too_large)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
}
@@ -6526,7 +6561,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
true,
FoundResult)) {
- if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ if (DiagnoseUseOfDecl(Fn, Arg->getBeginLoc()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
@@ -6579,7 +6614,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
ParamRefType->getPointeeType(),
true,
FoundResult)) {
- if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ if (DiagnoseUseOfDecl(Fn, Arg->getBeginLoc()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
@@ -7357,9 +7392,9 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
ParamUseRange = findTemplateParameter(
Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
if (ParamUseRange.isValid()) {
- S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(),
+ S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getBeginLoc(),
diag::err_dependent_typed_non_type_arg_in_partial_spec)
- << Param->getType();
+ << Param->getType();
S.Diag(Param->getLocation(), diag::note_template_param_here)
<< (IsDefaultArgument ? ParamUseRange : SourceRange())
<< ParamUseRange;
@@ -7616,7 +7651,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
TemplateArgs,
CanonType,
PrevPartial);
- SetNestedNameSpecifier(Partial, SS);
+ SetNestedNameSpecifier(*this, Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
Partial->setTemplateParameterListsInfo(
Context, TemplateParameterLists.drop_back(1));
@@ -7642,7 +7677,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
ClassTemplate,
Converted,
PrevDecl);
- SetNestedNameSpecifier(Specialization, SS);
+ SetNestedNameSpecifier(*this, Specialization, SS);
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
TemplateParameterLists);
@@ -7699,9 +7734,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
NamedDecl *Hidden = nullptr;
if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
SkipBody->ShouldSkip = true;
+ SkipBody->Previous = Def;
makeMergedDefinitionVisible(Hidden);
- // From here on out, treat this as just a redeclaration.
- TUK = TUK_Declaration;
} else if (Def) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_redefinition) << Specialization << Range;
@@ -7715,7 +7749,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition) {
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(Specialization);
AddMsStructLayoutForRecord(Specialization);
}
@@ -7751,7 +7785,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->setLexicalDeclContext(CurContext);
// We may be starting the definition of this specialization.
- if (TUK == TUK_Definition)
+ if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
Specialization->startDefinition();
if (TUK == TUK_Friend) {
@@ -7767,6 +7801,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// context. However, specializations are not found by name lookup.
CurContext->addDecl(Specialization);
}
+
+ if (SkipBody && SkipBody->ShouldSkip)
+ return SkipBody->Previous;
+
return Specialization;
}
@@ -7928,6 +7966,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
HasNoEffect = true;
return false;
}
+ llvm_unreachable("Unexpected TemplateSpecializationKind!");
case TSK_ExplicitInstantiationDefinition:
switch (PrevTSK) {
@@ -8065,9 +8104,13 @@ Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
///
/// \param Previous the set of declarations that may be specialized by
/// this function specialization.
+///
+/// \param QualifiedFriend whether this is a lookup for a qualified friend
+/// declaration with no explicit template argument list that might be
+/// befriending a function template specialization.
bool Sema::CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
- LookupResult &Previous) {
+ LookupResult &Previous, bool QualifiedFriend) {
// The set of function template specializations that could match this
// explicit function template specialization.
UnresolvedSet<8> Candidates;
@@ -8100,7 +8143,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
- EPI.TypeQuals |= Qualifiers::Const;
+ EPI.TypeQuals.addConst();
FT = Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI);
}
@@ -8154,10 +8197,25 @@ bool Sema::CheckFunctionTemplateSpecialization(
}
}
+ // For a qualified friend declaration (with no explicit marker to indicate
+ // that a template specialization was intended), note all (template and
+ // non-template) candidates.
+ if (QualifiedFriend && Candidates.empty()) {
+ Diag(FD->getLocation(), diag::err_qualified_friend_no_match)
+ << FD->getDeclName() << FDLookupContext;
+ // FIXME: We should form a single candidate list and diagnose all
+ // candidates at once, to get proper sorting and limiting.
+ for (auto *OldND : Previous) {
+ if (auto *OldFD = dyn_cast<FunctionDecl>(OldND->getUnderlyingDecl()))
+ NoteOverloadCandidate(OldND, OldFD, FD->getType(), false);
+ }
+ FailedCandidates.NoteCandidates(*this, FD->getLocation());
+ return true;
+ }
+
// Find the most specialized function template.
UnresolvedSetIterator Result = getMostSpecialized(
- Candidates.begin(), Candidates.end(), FailedCandidates,
- FD->getLocation(),
+ Candidates.begin(), Candidates.end(), FailedCandidates, FD->getLocation(),
PDiag(diag::err_function_template_spec_no_match) << FD->getDeclName(),
PDiag(diag::err_function_template_spec_ambiguous)
<< FD->getDeclName() << (ExplicitTemplateArgs != nullptr),
@@ -8304,6 +8362,8 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
QualType Adjusted = Function->getType();
if (!hasExplicitCallingConv(Adjusted))
Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+ // This doesn't handle deduced return types, but both function
+ // declarations should be undeduced at this point.
if (Context.hasSameType(Adjusted, Method->getType())) {
FoundInstantiation = *I;
Instantiation = Method;
@@ -8573,7 +8633,7 @@ static void dllExportImportClassTemplateSpecialization(
for (auto &B : Def->bases()) {
if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
B.getType()->getAsCXXRecordDecl()))
- S.propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart());
+ S.propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getBeginLoc());
}
S.referenceDLLExportedClassMethods();
@@ -8736,7 +8796,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
ClassTemplate,
Converted,
PrevDecl);
- SetNestedNameSpecifier(Specialization, SS);
+ SetNestedNameSpecifier(*this, Specialization, SS);
if (!HasNoEffect && !PrevDecl) {
// Insert the new specialization.
@@ -8990,10 +9050,9 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
DeclarationName Name = NameInfo.getName();
if (!Name) {
if (!D.isInvalidType())
- Diag(D.getDeclSpec().getLocStart(),
+ Diag(D.getDeclSpec().getBeginLoc(),
diag::err_explicit_instantiation_requires_name)
- << D.getDeclSpec().getSourceRange()
- << D.getSourceRange();
+ << D.getDeclSpec().getSourceRange() << D.getSourceRange();
return true;
}
@@ -9046,8 +9105,8 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// A deduction guide is not on the list of entities that can be explicitly
// instantiated.
if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
- Diag(D.getDeclSpec().getLocStart(), diag::err_deduction_guide_specialized)
- << /*explicit instantiation*/ 0;
+ Diag(D.getDeclSpec().getBeginLoc(), diag::err_deduction_guide_specialized)
+ << /*explicit instantiation*/ 0;
return true;
}
@@ -9105,7 +9164,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
//
// This includes auto-typed variable template instantiations.
if (R->isUndeducedType()) {
- Diag(T->getTypeLoc().getLocStart(),
+ Diag(T->getTypeLoc().getBeginLoc(),
diag::err_auto_not_allowed_var_inst);
return true;
}
@@ -9165,17 +9224,15 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
if (!HasNoEffect) {
// Instantiate static data member or variable template.
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
- if (PrevTemplate) {
- // Merge attributes.
- ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
- }
+ // Merge attributes.
+ ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
}
// Check the new variable specialization against the parsed input.
if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) {
- Diag(T->getTypeLoc().getLocStart(),
+ Diag(T->getTypeLoc().getBeginLoc(),
diag::err_invalid_var_template_spec_type)
<< 0 << PrevTemplate << R << Prev->getType();
Diag(PrevTemplate->getLocation(), diag::note_template_declared_here)
@@ -9293,7 +9350,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
PDiag(DiagID) << Specialization->getType(),
PDiag(diag::note_explicit_instantiation_here),
Specialization->getType()->getAs<FunctionProtoType>(),
- Specialization->getLocation(), FPT, D.getLocStart());
+ Specialization->getLocation(), FPT, D.getBeginLoc());
// In Microsoft mode, mismatching exception specifications just cause a
// warning.
if (!getLangOpts().MicrosoftExt && Result)
@@ -9621,7 +9678,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
- findFailedBooleanCondition(Cond, /*AllowTopLevelCond=*/true);
+ findFailedBooleanCondition(Cond);
Diag(FailedCond->getExprLoc(),
diag::err_typename_nested_not_found_requirement)
@@ -9866,6 +9923,15 @@ bool Sema::RebuildTemplateParamsInCurrentInstantiation(
if (!NewTSI)
return true;
+ if (NewTSI->getType()->isUndeducedType()) {
+ // C++17 [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains
+ // - an identifier associated by name lookup with a non-type
+ // template-parameter declared with a type that contains a
+ // placeholder type (7.1.7.4),
+ NewTSI = SubstAutoTypeSourceInfo(NewTSI, Context.DependentTy);
+ }
+
if (NewTSI != NTTP->getTypeSourceInfo()) {
NTTP->setTypeSourceInfo(NewTSI);
NTTP->setType(NewTSI->getType());
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index df46d6115a20..f2f989ce1241 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -178,6 +178,8 @@ getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
while (true) {
if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
E = IC->getSubExpr();
+ else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
+ E = CE->getSubExpr();
else if (SubstNonTypeTemplateParmExpr *Subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(E))
E = Subst->getReplacement();
@@ -3076,7 +3078,7 @@ Sema::SubstituteExplicitTemplateArguments(
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
// and the end of the function-definition, member-declarator, or
// declarator.
- unsigned ThisTypeQuals = 0;
+ Qualifiers ThisTypeQuals;
CXXRecordDecl *ThisContext = nullptr;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
ThisContext = Method->getParent();
@@ -4423,11 +4425,15 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
if (const AutoType *AT = Type.getType()->getAs<AutoType>()) {
if (AT->isDecltypeAuto()) {
if (isa<InitListExpr>(Init)) {
- Diag(Init->getLocStart(), diag::err_decltype_auto_initializer_list);
+ Diag(Init->getBeginLoc(), diag::err_decltype_auto_initializer_list);
return DAR_FailedAlreadyDiagnosed;
}
- QualType Deduced = BuildDecltypeType(Init, Init->getLocStart(), false);
+ ExprResult ER = CheckPlaceholderExpr(Init);
+ if (ER.isInvalid())
+ return DAR_FailedAlreadyDiagnosed;
+ Init = ER.get();
+ QualType Deduced = BuildDecltypeType(Init, Init->getBeginLoc(), false);
if (Deduced.isNull())
return DAR_FailedAlreadyDiagnosed;
// FIXME: Support a non-canonical deduced type for 'auto'.
@@ -4438,7 +4444,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
return DAR_Succeeded;
} else if (!getLangOpts().CPlusPlus) {
if (isa<InitListExpr>(Init)) {
- Diag(Init->getLocStart(), diag::err_auto_init_list_from_c);
+ Diag(Init->getBeginLoc(), diag::err_auto_init_list_from_c);
return DAR_FailedAlreadyDiagnosed;
}
}
@@ -4655,8 +4661,7 @@ AddImplicitObjectParameterType(ASTContext &Context,
// The standard doesn't say explicitly, but we pick the appropriate kind of
// reference type based on [over.match.funcs]p4.
QualType ArgTy = Context.getTypeDeclType(Method->getParent());
- ArgTy = Context.getQualifiedType(ArgTy,
- Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ ArgTy = Context.getQualifiedType(ArgTy, Method->getTypeQualifiers());
if (Method->getRefQualifier() == RQ_RValue)
ArgTy = Context.getRValueReferenceType(ArgTy);
else
@@ -5225,6 +5230,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
while (true) {
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExpr();
+ else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(E))
+ E = CE->getSubExpr();
else if (const SubstNonTypeTemplateParmExpr *Subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(E))
E = Subst->getReplacement();
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 1aa69bd35d67..96abeed82493 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -199,6 +199,7 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case DefaultTemplateArgumentChecking:
case DeclaringSpecialMember:
case DefiningSynthesizedFunction:
+ case ExceptionSpecEvaluation:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -621,6 +622,12 @@ void Sema::PrintInstantiationStack() {
break;
}
+ case CodeSynthesisContext::ExceptionSpecEvaluation:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_evaluating_exception_spec_here)
+ << cast<FunctionDecl>(Active->Entity);
+ break;
+
case CodeSynthesisContext::ExceptionSpecInstantiation:
Diags.Report(Active->PointOfInstantiation,
diag::note_template_exception_spec_instantiation_here)
@@ -668,7 +675,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// context, depending on what else is on the stack.
if (isa<TypeAliasTemplateDecl>(Active->Entity))
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case CodeSynthesisContext::DefaultFunctionArgumentInstantiation:
case CodeSynthesisContext::ExceptionSpecInstantiation:
// This is a template instantiation, so there is no SFINAE.
@@ -695,6 +702,12 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// there is no SFINAE.
return None;
+ case CodeSynthesisContext::ExceptionSpecEvaluation:
+ // FIXME: This should not be treated as a SFINAE context, because
+ // we will cache an incorrect exception specification. However, clang
+ // bootstrap relies this! See PR31692.
+ break;
+
case CodeSynthesisContext::Memoization:
break;
}
@@ -894,7 +907,7 @@ namespace {
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
@@ -1154,7 +1167,7 @@ TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
- return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentType());
+ return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentKind());
}
ExprResult
@@ -1414,7 +1427,7 @@ template<typename Fn>
QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec) {
// We need a local instantiation scope for this function prototype.
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
@@ -1653,7 +1666,7 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals) {
+ Qualifiers ThisTypeQuals) {
assert(!CodeSynthesisContexts.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
@@ -1708,7 +1721,7 @@ void Sema::SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
Proto->getExtProtoInfo().ExceptionSpec;
SmallVector<QualType, 4> ExceptionStorage;
- if (SubstExceptionSpec(New->getTypeSourceInfo()->getTypeLoc().getLocEnd(),
+ if (SubstExceptionSpec(New->getTypeSourceInfo()->getTypeLoc().getEndLoc(),
ESI, ExceptionStorage, Args))
// On error, recover by dropping the exception specification.
ESI.Type = EST_None;
@@ -1789,7 +1802,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
if (NewArg.isUsable()) {
// It would be nice if we still had this.
- SourceLocation EqualLoc = NewArg.get()->getLocStart();
+ SourceLocation EqualLoc = NewArg.get()->getBeginLoc();
SetParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
}
} else {
@@ -2135,7 +2148,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
CXXRecordDecl *ThisContext =
dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
- CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
ND && ND->isCXXInstanceMember());
Attr *NewAttr =
@@ -2303,7 +2316,7 @@ bool Sema::InstantiateInClassInitializer(
Diag(PointOfInstantiation,
diag::err_in_class_initializer_not_yet_parsed)
<< OutermostClass << Pattern;
- Diag(Pattern->getLocEnd(), diag::note_in_class_initializer_not_yet_parsed);
+ Diag(Pattern->getEndLoc(), diag::note_in_class_initializer_not_yet_parsed);
Instantiation->setInvalidDecl();
return true;
}
@@ -2330,14 +2343,14 @@ bool Sema::InstantiateInClassInitializer(
// Instantiate the initializer.
ActOnStartCXXInClassMemberInitializer();
- CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), /*TypeQuals=*/0);
+ CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), Qualifiers());
ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
/*CXXDirectInit=*/false);
Expr *Init = NewInit.get();
assert((!Init || !isa<ParenListExpr>(Init)) && "call-style init in class");
ActOnFinishCXXInClassMemberInitializer(
- Instantiation, Init ? Init->getLocStart() : SourceLocation(), Init);
+ Instantiation, Init ? Init->getBeginLoc() : SourceLocation(), Init);
if (auto *L = getASTMutationListener())
L->DefaultMemberInitializerInstantiated(Instantiation);
@@ -2561,10 +2574,14 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
for (auto *D : Instantiation->decls()) {
bool SuppressNew = false;
if (auto *Function = dyn_cast<FunctionDecl>(D)) {
- if (FunctionDecl *Pattern
- = Function->getInstantiatedFromMemberFunction()) {
- MemberSpecializationInfo *MSInfo
- = Function->getMemberSpecializationInfo();
+ if (FunctionDecl *Pattern =
+ Function->getInstantiatedFromMemberFunction()) {
+
+ if (Function->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
+ MemberSpecializationInfo *MSInfo =
+ Function->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
@@ -2605,6 +2622,9 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
continue;
if (Var->isStaticDataMember()) {
+ if (Var->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
@@ -2636,6 +2656,9 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
}
}
} else if (auto *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (Record->hasAttr<ExcludeFromExplicitInstantiationAttr>())
+ continue;
+
// Always skip the injected-class-name, along with any
// redeclarations of nested classes, since both would cause us
// to try to instantiate the members of a class twice.
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 86492716f685..fad3c065e896 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -295,7 +295,7 @@ static void instantiateOMPDeclareSimdDeclAttr(
PVD, FD->getParamDecl(PVD->getFunctionScopeIndex()));
return S.SubstExpr(E, TemplateArgs);
}
- Sema::CXXThisScopeRAII ThisScope(S, ThisContext, /*TypeQuals=*/0,
+ Sema::CXXThisScopeRAII ThisScope(S, ThisContext, Qualifiers(),
FD->isCXXInstanceMember());
return S.SubstExpr(E, TemplateArgs);
};
@@ -355,7 +355,7 @@ void Sema::InstantiateAttrsForDecl(
// applicable to template declaration, we'll need to add them here.
CXXThisScopeRAII ThisScope(
*this, dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext()),
- /*TypeQuals*/ 0, ND->isCXXInstanceMember());
+ Qualifiers(), ND->isCXXInstanceMember());
Attr *NewAttr = sema::instantiateTemplateAttributeForDecl(
TmplAttr, Context, *this, TemplateArgs);
@@ -365,6 +365,20 @@ void Sema::InstantiateAttrsForDecl(
}
}
+static Sema::RetainOwnershipKind
+attrToRetainOwnershipKind(const Attr *A) {
+ switch (A->getKind()) {
+ case clang::attr::CFConsumed:
+ return Sema::RetainOwnershipKind::CF;
+ case clang::attr::OSConsumed:
+ return Sema::RetainOwnershipKind::OS;
+ case clang::attr::NSConsumed:
+ return Sema::RetainOwnershipKind::NS;
+ default:
+ llvm_unreachable("Wrong argument supplied");
+ }
+}
+
void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Tmpl, Decl *New,
LateInstantiatedAttrVec *LateAttrs,
@@ -438,11 +452,12 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
- if (isa<NSConsumedAttr>(TmplAttr) || isa<CFConsumedAttr>(TmplAttr)) {
- AddNSConsumedAttr(TmplAttr->getRange(), New,
- TmplAttr->getSpellingListIndex(),
- isa<NSConsumedAttr>(TmplAttr),
- /*template instantiation*/ true);
+ if (isa<NSConsumedAttr>(TmplAttr) || isa<OSConsumedAttr>(TmplAttr) ||
+ isa<CFConsumedAttr>(TmplAttr)) {
+ AddXConsumedAttr(New, TmplAttr->getRange(),
+ TmplAttr->getSpellingListIndex(),
+ attrToRetainOwnershipKind(TmplAttr),
+ /*template instantiation=*/true);
continue;
}
@@ -459,7 +474,7 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
NamedDecl *ND = dyn_cast<NamedDecl>(New);
CXXRecordDecl *ThisContext =
dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
- CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
+ CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
ND && ND->isCXXInstanceMember());
Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
@@ -562,7 +577,7 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
RD->getEnclosingNamespaceContext() == SemaRef.getStdNamespace() &&
RD->getIdentifier() && RD->getIdentifier()->isStr("common_type") &&
D->getIdentifier() && D->getIdentifier()->isStr("type") &&
- SemaRef.getSourceManager().isInSystemHeader(D->getLocStart()))
+ SemaRef.getSourceManager().isInSystemHeader(D->getBeginLoc()))
// Fold it to the (non-reference) type which g++ would have produced.
DI = SemaRef.Context.getTrivialTypeSourceInfo(
DI->getType().getNonReferenceType());
@@ -570,10 +585,10 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
// Create the new typedef
TypedefNameDecl *Typedef;
if (IsTypeAlias)
- Typedef = TypeAliasDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ Typedef = TypeAliasDecl::Create(SemaRef.Context, Owner, D->getBeginLoc(),
D->getLocation(), D->getIdentifier(), DI);
else
- Typedef = TypedefDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ Typedef = TypedefDecl::Create(SemaRef.Context, Owner, D->getBeginLoc(),
D->getLocation(), D->getIdentifier(), DI);
if (Invalid)
Typedef->setInvalidDecl();
@@ -748,6 +763,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
Var->setImplicit(D->isImplicit());
+ if (Var->isStaticLocal())
+ SemaRef.CheckStaticLocalForDllExport(Var);
+
return Var;
}
@@ -872,7 +890,7 @@ Decl *TemplateDeclInstantiator::VisitMSPropertyDecl(MSPropertyDecl *D) {
MSPropertyDecl *Property = MSPropertyDecl::Create(
SemaRef.Context, Owner, D->getLocation(), D->getDeclName(), DI->getType(),
- DI, D->getLocStart(), D->getGetterId(), D->getSetterId());
+ DI, D->getBeginLoc(), D->getGetterId(), D->getSetterId());
SemaRef.InstantiateAttrs(TemplateArgs, D, Property, LateAttrs,
StartingScope);
@@ -932,7 +950,7 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
if (!InstTy)
return nullptr;
- FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocStart(),
+ FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getBeginLoc(),
D->getFriendLoc(), InstTy);
if (!FD)
return nullptr;
@@ -991,10 +1009,10 @@ Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
PrevDecl = cast<EnumDecl>(Prev);
}
- EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
- D->getLocation(), D->getIdentifier(),
- PrevDecl, D->isScoped(),
- D->isScopedUsingClassTag(), D->isFixed());
+ EnumDecl *Enum =
+ EnumDecl::Create(SemaRef.Context, Owner, D->getBeginLoc(),
+ D->getLocation(), D->getIdentifier(), PrevDecl,
+ D->isScoped(), D->isScopedUsingClassTag(), D->isFixed());
if (D->isFixed()) {
if (TypeSourceInfo *TI = D->getIntegerTypeSourceInfo()) {
// If we have type source information for the underlying type, it means it
@@ -1228,7 +1246,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
TemplateParameterList *PrevParams
- = PrevClassTemplate->getTemplateParameters();
+ = PrevClassTemplate->getMostRecentDecl()->getTemplateParameters();
// Make sure the parameter lists match.
if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams,
@@ -1250,15 +1268,17 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
}
}
- CXXRecordDecl *RecordInst
- = CXXRecordDecl::Create(SemaRef.Context, Pattern->getTagKind(), DC,
- Pattern->getLocStart(), Pattern->getLocation(),
- Pattern->getIdentifier(), PrevDecl,
- /*DelayTypeCreation=*/true);
+ CXXRecordDecl *RecordInst = CXXRecordDecl::Create(
+ SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getBeginLoc(),
+ Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl,
+ /*DelayTypeCreation=*/true);
if (QualifierLoc)
RecordInst->setQualifierInfo(QualifierLoc);
+ SemaRef.InstantiateAttrsForDecl(TemplateArgs, Pattern, RecordInst, LateAttrs,
+ StartingScope);
+
ClassTemplateDecl *Inst
= ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(),
D->getIdentifier(), InstParams, RecordInst);
@@ -1484,15 +1504,17 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
PrevDecl = cast<CXXRecordDecl>(Prev);
}
- CXXRecordDecl *Record
- = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
- D->getLocStart(), D->getLocation(),
- D->getIdentifier(), PrevDecl);
+ CXXRecordDecl *Record = CXXRecordDecl::Create(
+ SemaRef.Context, D->getTagKind(), Owner, D->getBeginLoc(),
+ D->getLocation(), D->getIdentifier(), PrevDecl);
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Record))
return nullptr;
+ SemaRef.InstantiateAttrsForDecl(TemplateArgs, D, Record, LateAttrs,
+ StartingScope);
+
Record->setImplicit(D->isImplicit());
// FIXME: Check against AS_none is an ugly hack to work around the issue that
// the tag decls introduced by friend class declarations don't have an access
@@ -1725,10 +1747,13 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
}
+ if (isFriend)
+ Function->setObjectOfFriendDecl();
+
if (InitFunctionInstantiation(Function, D))
Function->setInvalidDecl();
- bool isExplicitSpecialization = false;
+ bool IsExplicitSpecialization = false;
LookupResult Previous(
SemaRef, Function->getDeclName(), SourceLocation(),
@@ -1741,9 +1766,6 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
= D->getDependentSpecializationInfo()) {
assert(isFriend && "non-friend has dependent specialization info?");
- // This needs to be set now for future sanity.
- Function->setObjectOfFriendDecl();
-
// Instantiate the explicit template arguments.
TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
Info->getRAngleLoc());
@@ -1766,8 +1788,25 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Previous))
Function->setInvalidDecl();
- isExplicitSpecialization = true;
+ IsExplicitSpecialization = true;
+ } else if (const ASTTemplateArgumentListInfo *Info =
+ D->getTemplateSpecializationArgsAsWritten()) {
+ // The name of this function was written as a template-id.
+ SemaRef.LookupQualifiedName(Previous, DC);
+
+ // Instantiate the explicit template arguments.
+ TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
+ Info->getRAngleLoc());
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return nullptr;
+
+ if (SemaRef.CheckFunctionTemplateSpecialization(Function,
+ &ExplicitArgs,
+ Previous))
+ Function->setInvalidDecl();
+ IsExplicitSpecialization = true;
} else if (TemplateParams || !FunctionTemplate) {
// Look only into the namespace where the friend would be declared to
// find a previous declaration. This is the innermost enclosing namespace,
@@ -1782,11 +1821,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Previous.clear();
}
- if (isFriend)
- Function->setObjectOfFriendDecl();
-
SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous,
- isExplicitSpecialization);
+ IsExplicitSpecialization);
NamedDecl *PrincipalDecl = (TemplateParams
? cast<NamedDecl>(FunctionTemplate)
@@ -1795,7 +1831,9 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
// If the original function was part of a friend declaration,
// inherit its namespace state and add it to the owner.
if (isFriend) {
- PrincipalDecl->setObjectOfFriendDecl();
+ Function->setObjectOfFriendDecl();
+ if (FunctionTemplateDecl *FT = Function->getDescribedFunctionTemplate())
+ FT->setObjectOfFriendDecl();
DC->makeDeclVisibleInContext(PrincipalDecl);
bool QueuedInstantiation = false;
@@ -1945,26 +1983,23 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
Constructor->isExplicit(),
Constructor->isInlineSpecified(),
false, Constructor->isConstexpr());
- Method->setRangeEnd(Constructor->getLocEnd());
+ Method->setRangeEnd(Constructor->getEndLoc());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
StartLoc, NameInfo, T, TInfo,
Destructor->isInlineSpecified(),
false);
- Method->setRangeEnd(Destructor->getLocEnd());
+ Method->setRangeEnd(Destructor->getEndLoc());
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
- Method = CXXConversionDecl::Create(SemaRef.Context, Record,
- StartLoc, NameInfo, T, TInfo,
- Conversion->isInlineSpecified(),
- Conversion->isExplicit(),
- Conversion->isConstexpr(),
- Conversion->getLocEnd());
+ Method = CXXConversionDecl::Create(
+ SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
+ Conversion->isInlineSpecified(), Conversion->isExplicit(),
+ Conversion->isConstexpr(), Conversion->getEndLoc());
} else {
StorageClass SC = D->isStatic() ? SC_Static : SC_None;
- Method = CXXMethodDecl::Create(SemaRef.Context, Record,
- StartLoc, NameInfo, T, TInfo,
- SC, D->isInlineSpecified(),
- D->isConstexpr(), D->getLocEnd());
+ Method = CXXMethodDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo,
+ T, TInfo, SC, D->isInlineSpecified(),
+ D->isConstexpr(), D->getEndLoc());
}
if (D->isInlined())
@@ -2034,7 +2069,54 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName,
Sema::ForExternalRedeclaration);
- if (!FunctionTemplate || TemplateParams || isFriend) {
+ bool IsExplicitSpecialization = false;
+
+ // If the name of this function was written as a template-id, instantiate
+ // the explicit template arguments.
+ if (DependentFunctionTemplateSpecializationInfo *Info
+ = D->getDependentSpecializationInfo()) {
+ assert(isFriend && "non-friend has dependent specialization info?");
+
+ // Instantiate the explicit template arguments.
+ TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
+ Info->getRAngleLoc());
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return nullptr;
+
+ // Map the candidate templates to their instantiations.
+ for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
+ Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ Info->getTemplate(I),
+ TemplateArgs);
+ if (!Temp) return nullptr;
+
+ Previous.addDecl(cast<FunctionTemplateDecl>(Temp));
+ }
+
+ if (SemaRef.CheckFunctionTemplateSpecialization(Method,
+ &ExplicitArgs,
+ Previous))
+ Method->setInvalidDecl();
+
+ IsExplicitSpecialization = true;
+ } else if (const ASTTemplateArgumentListInfo *Info =
+ D->getTemplateSpecializationArgsAsWritten()) {
+ SemaRef.LookupQualifiedName(Previous, DC);
+
+ TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
+ Info->getRAngleLoc());
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return nullptr;
+
+ if (SemaRef.CheckFunctionTemplateSpecialization(Method,
+ &ExplicitArgs,
+ Previous))
+ Method->setInvalidDecl();
+
+ IsExplicitSpecialization = true;
+ } else if (!FunctionTemplate || TemplateParams || isFriend) {
SemaRef.LookupQualifiedName(Previous, Record);
// In C++, the previous declaration we find might be a tag type
@@ -2046,7 +2128,8 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
}
if (!IsClassScopeSpecialization)
- SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous, false);
+ SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous,
+ IsExplicitSpecialization);
if (D->isPure())
SemaRef.CheckPureMethod(Method, SourceRange());
@@ -2119,7 +2202,7 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
assert(D->getTypeForDecl()->isTemplateTypeParmType());
TemplateTypeParmDecl *Inst = TemplateTypeParmDecl::Create(
- SemaRef.Context, Owner, D->getLocStart(), D->getLocation(),
+ SemaRef.Context, Owner, D->getBeginLoc(), D->getLocation(),
D->getDepth() - TemplateArgs.getNumSubstitutedLevels(), D->getIndex(),
D->getIdentifier(), D->wasDeclaredWithTypename(), D->isParameterPack());
Inst->setAccess(AS_public);
@@ -2683,26 +2766,28 @@ Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
}
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *Decl) {
+ ClassScopeFunctionSpecializationDecl *Decl) {
CXXMethodDecl *OldFD = Decl->getSpecialization();
CXXMethodDecl *NewFD =
cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true));
if (!NewFD)
return nullptr;
- LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
- Sema::ForExternalRedeclaration);
-
- TemplateArgumentListInfo TemplateArgs;
- TemplateArgumentListInfo *TemplateArgsPtr = nullptr;
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ TemplateArgumentListInfo *ExplicitTemplateArgsPtr = nullptr;
if (Decl->hasExplicitTemplateArgs()) {
- TemplateArgs = Decl->templateArgs();
- TemplateArgsPtr = &TemplateArgs;
+ if (SemaRef.Subst(Decl->templateArgs().getArgumentArray(),
+ Decl->templateArgs().size(), ExplicitTemplateArgs,
+ TemplateArgs))
+ return nullptr;
+ ExplicitTemplateArgsPtr = &ExplicitTemplateArgs;
}
+ LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
+ Sema::ForExternalRedeclaration);
SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
- if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, TemplateArgsPtr,
- Previous)) {
+ if (SemaRef.CheckFunctionTemplateSpecialization(
+ NewFD, ExplicitTemplateArgsPtr, Previous)) {
NewFD->setInvalidDecl();
return NewFD;
}
@@ -2735,13 +2820,27 @@ Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
return TD;
}
+Decl *TemplateDeclInstantiator::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
+ llvm_unreachable(
+ "Requires directive cannot be instantiated within a dependent context");
+}
+
Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
OMPDeclareReductionDecl *D) {
// Instantiate type and check if it is allowed.
- QualType SubstReductionType = SemaRef.ActOnOpenMPDeclareReductionType(
- D->getLocation(),
- ParsedType::make(SemaRef.SubstType(D->getType(), TemplateArgs,
- D->getLocation(), DeclarationName())));
+ const bool RequiresInstantiation =
+ D->getType()->isDependentType() ||
+ D->getType()->isInstantiationDependentType() ||
+ D->getType()->containsUnexpandedParameterPack();
+ QualType SubstReductionType;
+ if (RequiresInstantiation) {
+ SubstReductionType = SemaRef.ActOnOpenMPDeclareReductionType(
+ D->getLocation(),
+ ParsedType::make(SemaRef.SubstType(
+ D->getType(), TemplateArgs, D->getLocation(), DeclarationName())));
+ } else {
+ SubstReductionType = D->getType();
+ }
if (SubstReductionType.isNull())
return nullptr;
bool IsCorrect = !SubstReductionType.isNull();
@@ -2758,25 +2857,35 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
/*S=*/nullptr, Owner, D->getDeclName(), ReductionTypes, D->getAccess(),
PrevDeclInScope);
auto *NewDRD = cast<OMPDeclareReductionDecl>(DRD.get().getSingleDecl());
- if (isDeclWithinFunction(NewDRD))
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewDRD);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewDRD);
+ if (!RequiresInstantiation) {
+ if (Expr *Combiner = D->getCombiner()) {
+ NewDRD->setCombinerData(D->getCombinerIn(), D->getCombinerOut());
+ NewDRD->setCombiner(Combiner);
+ if (Expr *Init = D->getInitializer()) {
+ NewDRD->setInitializerData(D->getInitOrig(), D->getInitPriv());
+ NewDRD->setInitializer(Init, D->getInitializerKind());
+ }
+ }
+ (void)SemaRef.ActOnOpenMPDeclareReductionDirectiveEnd(
+ /*S=*/nullptr, DRD, IsCorrect && !D->isInvalidDecl());
+ return NewDRD;
+ }
Expr *SubstCombiner = nullptr;
Expr *SubstInitializer = nullptr;
// Combiners instantiation sequence.
if (D->getCombiner()) {
SemaRef.ActOnOpenMPDeclareReductionCombinerStart(
/*S=*/nullptr, NewDRD);
- const char *Names[] = {"omp_in", "omp_out"};
- for (auto &Name : Names) {
- DeclarationName DN(&SemaRef.Context.Idents.get(Name));
- auto OldLookup = D->lookup(DN);
- auto Lookup = NewDRD->lookup(DN);
- if (!OldLookup.empty() && !Lookup.empty()) {
- assert(Lookup.size() == 1 && OldLookup.size() == 1);
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldLookup.front(),
- Lookup.front());
- }
- }
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getCombinerIn())->getDecl(),
+ cast<DeclRefExpr>(NewDRD->getCombinerIn())->getDecl());
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getCombinerOut())->getDecl(),
+ cast<DeclRefExpr>(NewDRD->getCombinerOut())->getDecl());
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
+ Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, Qualifiers(),
+ ThisContext);
SubstCombiner = SemaRef.SubstExpr(D->getCombiner(), TemplateArgs).get();
SemaRef.ActOnOpenMPDeclareReductionCombinerEnd(NewDRD, SubstCombiner);
// Initializers instantiation sequence.
@@ -2784,19 +2893,12 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
VarDecl *OmpPrivParm =
SemaRef.ActOnOpenMPDeclareReductionInitializerStart(
/*S=*/nullptr, NewDRD);
- const char *Names[] = {"omp_orig", "omp_priv"};
- for (auto &Name : Names) {
- DeclarationName DN(&SemaRef.Context.Idents.get(Name));
- auto OldLookup = D->lookup(DN);
- auto Lookup = NewDRD->lookup(DN);
- if (!OldLookup.empty() && !Lookup.empty()) {
- assert(Lookup.size() == 1 && OldLookup.size() == 1);
- auto *OldVD = cast<VarDecl>(OldLookup.front());
- auto *NewVD = cast<VarDecl>(Lookup.front());
- SemaRef.InstantiateVariableInitializer(NewVD, OldVD, TemplateArgs);
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldVD, NewVD);
- }
- }
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getInitOrig())->getDecl(),
+ cast<DeclRefExpr>(NewDRD->getInitOrig())->getDecl());
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(
+ cast<DeclRefExpr>(D->getInitPriv())->getDecl(),
+ cast<DeclRefExpr>(NewDRD->getInitPriv())->getDecl());
if (D->getInitializerKind() == OMPDeclareReductionDecl::CallInit) {
SubstInitializer =
SemaRef.SubstExpr(D->getInitializer(), TemplateArgs).get();
@@ -2813,8 +2915,9 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
SubstInitializer) ||
(D->getInitializerKind() != OMPDeclareReductionDecl::CallInit &&
!SubstInitializer && !SubstInitializer));
- } else
+ } else {
IsCorrect = false;
+ }
(void)SemaRef.ActOnOpenMPDeclareReductionDirectiveEnd(/*S=*/nullptr, DRD,
IsCorrect);
@@ -2931,15 +3034,10 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
}
// Create the class template partial specialization declaration.
- ClassTemplateSpecializationDecl *InstD
- = ClassTemplateSpecializationDecl::Create(SemaRef.Context,
- D->getTagKind(),
- Owner,
- D->getLocStart(),
- D->getLocation(),
- InstClassTemplate,
- Converted,
- PrevDecl);
+ ClassTemplateSpecializationDecl *InstD =
+ ClassTemplateSpecializationDecl::Create(
+ SemaRef.Context, D->getTagKind(), Owner, D->getBeginLoc(),
+ D->getLocation(), InstClassTemplate, Converted, PrevDecl);
// Add this partial specialization to the set of class template partial
// specializations.
@@ -3008,7 +3106,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
// Check that the template argument list is well-formed for this template.
SmallVector<TemplateArgument, 4> Converted;
if (SemaRef.CheckTemplateArgumentList(
- VarTemplate, VarTemplate->getLocStart(),
+ VarTemplate, VarTemplate->getBeginLoc(),
const_cast<TemplateArgumentListInfo &>(VarTemplateArgsInfo), false,
Converted))
return nullptr;
@@ -3237,18 +3335,11 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
// Create the class template partial specialization declaration.
- ClassTemplatePartialSpecializationDecl *InstPartialSpec
- = ClassTemplatePartialSpecializationDecl::Create(SemaRef.Context,
- PartialSpec->getTagKind(),
- Owner,
- PartialSpec->getLocStart(),
- PartialSpec->getLocation(),
- InstParams,
- ClassTemplate,
- Converted,
- InstTemplateArgs,
- CanonType,
- nullptr);
+ ClassTemplatePartialSpecializationDecl *InstPartialSpec =
+ ClassTemplatePartialSpecializationDecl::Create(
+ SemaRef.Context, PartialSpec->getTagKind(), Owner,
+ PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
+ ClassTemplate, Converted, InstTemplateArgs, CanonType, nullptr);
// Substitute the nested name specifier, if any.
if (SubstQualifier(PartialSpec, InstPartialSpec))
return nullptr;
@@ -3412,7 +3503,7 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
assert(Params.empty() && "parameter vector is non-empty at start");
CXXRecordDecl *ThisContext = nullptr;
- unsigned ThisTypeQuals = 0;
+ Qualifiers ThisTypeQuals;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
ThisContext = cast<CXXRecordDecl>(Owner);
ThisTypeQuals = Method->getTypeQualifiers();
@@ -3707,6 +3798,9 @@ TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
if (InitFunctionInstantiation(New, Tmpl))
return true;
+ if (isa<CXXDestructorDecl>(New) && SemaRef.getLangOpts().CPlusPlus11)
+ SemaRef.AdjustDestructorExceptionSpec(cast<CXXDestructorDecl>(New));
+
New->setAccess(Tmpl->getAccess());
if (Tmpl->isVirtualAsWritten())
New->setVirtualAsWritten(true);
@@ -3823,7 +3917,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
std::make_pair(Function, PointOfInstantiation));
} else if (TSK == TSK_ImplicitInstantiation) {
if (AtEndOfTU && !getDiagnostics().hasErrorOccurred() &&
- !getSourceManager().isInSystemHeader(PatternDecl->getLocStart())) {
+ !getSourceManager().isInSystemHeader(PatternDecl->getBeginLoc())) {
Diag(PointOfInstantiation, diag::warn_func_template_missing)
<< Function;
Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
@@ -4366,7 +4460,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
} else if (TSK == TSK_ImplicitInstantiation) {
// Warn about missing definition at the end of translation unit.
if (AtEndOfTU && !getDiagnostics().hasErrorOccurred() &&
- !getSourceManager().isInSystemHeader(PatternDecl->getLocStart())) {
+ !getSourceManager().isInSystemHeader(PatternDecl->getBeginLoc())) {
Diag(PointOfInstantiation, diag::warn_var_template_missing)
<< Var;
Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
@@ -4911,7 +5005,9 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
return D;
if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
- (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext()) ||
+ ((ParentDC->isFunctionOrMethod() ||
+ isa<OMPDeclareReductionDecl>(ParentDC)) &&
+ ParentDC->isDependentContext()) ||
(isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
@@ -5192,10 +5288,20 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Inst.first)) {
bool DefinitionRequired = Function->getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition;
- InstantiateFunctionDefinition(/*FIXME:*/Inst.second, Function, true,
- DefinitionRequired, true);
- if (Function->isDefined())
- Function->setInstantiationIsPending(false);
+ if (Function->isMultiVersion()) {
+ getASTContext().forEachMultiversionedFunctionVersion(
+ Function, [this, Inst, DefinitionRequired](FunctionDecl *CurFD) {
+ InstantiateFunctionDefinition(/*FIXME:*/ Inst.second, CurFD, true,
+ DefinitionRequired, true);
+ if (CurFD->isDefined())
+ CurFD->setInstantiationIsPending(false);
+ });
+ } else {
+ InstantiateFunctionDefinition(/*FIXME:*/ Inst.second, Function, true,
+ DefinitionRequired, true);
+ if (Function->isDefined())
+ Function->setInstantiationIsPending(false);
+ }
continue;
}
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 6f9dddf5c05e..0e7fc20d2487 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -164,7 +164,7 @@ namespace {
// A function parameter pack is a pack expansion, so cannot contain
// an unexpanded parameter pack. Likewise for a template parameter
// pack that contains any references to other packs.
- if (D->isParameterPack())
+ if (D && D->isParameterPack())
return true;
return inherited::TraverseDecl(D);
@@ -392,7 +392,7 @@ bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
- return DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
+ return DiagnoseUnexpandedParameterPacks(E->getBeginLoc(), UPPC, Unexpanded);
}
bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
@@ -976,6 +976,7 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
PDiag(diag::note_parameter_pack_here));
ParameterPack = Corrected.getCorrectionDecl();
}
+ break;
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
@@ -1125,8 +1126,8 @@ static void CheckFoldOperand(Sema &S, Expr *E) {
isa<AbstractConditionalOperator>(E)) {
S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand)
<< E->getSourceRange()
- << FixItHint::CreateInsertion(E->getLocStart(), "(")
- << FixItHint::CreateInsertion(E->getLocEnd(), ")");
+ << FixItHint::CreateInsertion(E->getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(E->getEndLoc(), ")");
}
}
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 284d34b22c04..b4c075e9c46d 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -116,6 +116,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_Pascal: \
case ParsedAttr::AT_SwiftCall: \
case ParsedAttr::AT_VectorCall: \
+ case ParsedAttr::AT_AArch64VectorPcs: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
case ParsedAttr::AT_Pcs: \
@@ -172,11 +173,25 @@ namespace {
/// processing is complete.
SmallVector<ParsedAttr *, 2> ignoredTypeAttrs;
+ /// Attributes corresponding to AttributedTypeLocs that we have not yet
+ /// populated.
+ // FIXME: The two-phase mechanism by which we construct Types and fill
+ // their TypeLocs makes it hard to correctly assign these. We keep the
+ // attributes in creation order as an attempt to make them line up
+ // properly.
+ using TypeAttrPair = std::pair<const AttributedType*, const Attr*>;
+ SmallVector<TypeAttrPair, 8> AttrsForTypes;
+ bool AttrsForTypesSorted = true;
+
+ /// Flag to indicate we parsed a noderef attribute. This is used for
+ /// validating that noderef was used on a pointer or array.
+ bool parsedNoDeref;
+
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
- : sema(sema), declarator(declarator),
- chunkIndex(declarator.getNumTypeObjects()),
- trivial(true), hasSavedAttrs(false) {}
+ : sema(sema), declarator(declarator),
+ chunkIndex(declarator.getNumTypeObjects()), trivial(true),
+ hasSavedAttrs(false), parsedNoDeref(false) {}
Sema &getSema() const {
return sema;
@@ -230,6 +245,47 @@ namespace {
diagnoseBadTypeAttribute(getSema(), *Attr, type);
}
+ /// Get an attributed type for the given attribute, and remember the Attr
+ /// object so that we can attach it to the AttributedTypeLoc.
+ QualType getAttributedType(Attr *A, QualType ModifiedType,
+ QualType EquivType) {
+ QualType T =
+ sema.Context.getAttributedType(A->getKind(), ModifiedType, EquivType);
+ AttrsForTypes.push_back({cast<AttributedType>(T.getTypePtr()), A});
+ AttrsForTypesSorted = false;
+ return T;
+ }
+
+ /// Extract and remove the Attr* for a given attributed type.
+ const Attr *takeAttrForAttributedType(const AttributedType *AT) {
+ if (!AttrsForTypesSorted) {
+ std::stable_sort(AttrsForTypes.begin(), AttrsForTypes.end(),
+ [](const TypeAttrPair &A, const TypeAttrPair &B) {
+ return A.first < B.first;
+ });
+ AttrsForTypesSorted = true;
+ }
+
+ // FIXME: This is quadratic if we have lots of reuses of the same
+ // attributed type.
+ for (auto It = std::partition_point(
+ AttrsForTypes.begin(), AttrsForTypes.end(),
+ [=](const TypeAttrPair &A) { return A.first < AT; });
+ It != AttrsForTypes.end() && It->first == AT; ++It) {
+ if (It->second) {
+ const Attr *Result = It->second;
+ It->second = nullptr;
+ return Result;
+ }
+ }
+
+ llvm_unreachable("no Attr* for AttributedType*");
+ }
+
+ void setParsedNoDeref(bool parsed) { parsedNoDeref = parsed; }
+
+ bool didParseNoDeref() const { return parsedNoDeref; }
+
~TypeProcessingState() {
if (trivial) return;
@@ -246,7 +302,7 @@ namespace {
getMutableDeclSpec().getAttributes().clearListOnly();
for (ParsedAttr *AL : savedAttrs)
- getMutableDeclSpec().getAttributes().addAtStart(AL);
+ getMutableDeclSpec().getAttributes().addAtEnd(AL);
}
};
} // end anonymous namespace
@@ -255,7 +311,7 @@ static void moveAttrFromListToList(ParsedAttr &attr,
ParsedAttributesView &fromList,
ParsedAttributesView &toList) {
fromList.remove(&attr);
- toList.addAtStart(&attr);
+ toList.addAtEnd(&attr);
}
/// The location of a type attribute.
@@ -656,7 +712,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
// faking up the function chunk is still the right thing to do.
// Otherwise, we need to fake up a function declarator.
- SourceLocation loc = declarator.getLocStart();
+ SourceLocation loc = declarator.getBeginLoc();
// ...and *prepend* it to the declarator.
SourceLocation NoLoc;
@@ -668,12 +724,8 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*NumArgs=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
- /*TypeQuals=*/0,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
- /*ConstQualifierLoc=*/NoLoc,
- /*VolatileQualifierLoc=*/NoLoc,
- /*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
@@ -681,8 +733,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None,
- loc, loc, declarator));
+ /*DeclsInPrototype=*/None, loc, loc, declarator));
// For consistency, make sure the state still has us as processing
// the decl spec.
@@ -808,17 +859,17 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
rangeToRemove = attr.getLocalSourceRange();
if (attr.getTypePtr()->getImmediateNullability()) {
typeArg = attr.getTypePtr()->getModifiedType();
- S.Diag(attr.getLocStart(),
+ S.Diag(attr.getBeginLoc(),
diag::err_objc_type_arg_explicit_nullability)
- << typeArg << FixItHint::CreateRemoval(rangeToRemove);
+ << typeArg << FixItHint::CreateRemoval(rangeToRemove);
diagnosed = true;
}
}
if (!diagnosed) {
- S.Diag(qual.getLocStart(), diag::err_objc_type_arg_qualified)
- << typeArg << typeArg.getQualifiers().getAsString()
- << FixItHint::CreateRemoval(rangeToRemove);
+ S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified)
+ << typeArg << typeArg.getQualifiers().getAsString()
+ << FixItHint::CreateRemoval(rangeToRemove);
}
}
@@ -878,9 +929,9 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
}
// Diagnose the mismatch.
- S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_does_not_match_bound)
- << typeArg << bound << typeParam->getDeclName();
+ << typeArg << bound << typeParam->getDeclName();
S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
<< typeParam->getDeclName();
@@ -906,9 +957,9 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
continue;
// Diagnose the mismatch.
- S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_does_not_match_bound)
- << typeArg << bound << typeParam->getDeclName();
+ << typeArg << bound << typeParam->getDeclName();
S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here)
<< typeParam->getDeclName();
@@ -924,10 +975,9 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
}
// Diagnose non-id-compatible type arguments.
- S.Diag(typeArgInfo->getTypeLoc().getLocStart(),
+ S.Diag(typeArgInfo->getTypeLoc().getBeginLoc(),
diag::err_objc_type_arg_not_id_compatible)
- << typeArg
- << typeArgInfo->getTypeLoc().getSourceRange();
+ << typeArg << typeArgInfo->getTypeLoc().getSourceRange();
if (failOnError)
return QualType();
@@ -1186,7 +1236,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
DeclSpec &DS = declarator.getMutableDeclSpec();
SourceLocation DeclLoc = declarator.getIdentifierLoc();
if (DeclLoc.isInvalid())
- DeclLoc = DS.getLocStart();
+ DeclLoc = DS.getBeginLoc();
ASTContext &Context = S.Context;
@@ -1268,8 +1318,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// when one is not allowed.
if (DS.isEmpty()) {
S.Diag(DeclLoc, diag::ext_missing_declspec)
- << DS.getSourceRange()
- << FixItHint::CreateInsertion(DS.getLocStart(), "int");
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
}
} else if (!DS.hasTypeSpecifier()) {
// C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
@@ -1818,8 +1868,7 @@ static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
}
static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
- std::string Quals =
- Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+ std::string Quals = FnTy->getTypeQuals().getAsString();
switch (FnTy->getRefQualifier()) {
case RQ_None:
@@ -1861,7 +1910,7 @@ static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc,
QualifiedFunctionKind QFK) {
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
const FunctionProtoType *FPT = T->getAs<FunctionProtoType>();
- if (!FPT || (FPT->getTypeQuals() == 0 && FPT->getRefQualifier() == RQ_None))
+ if (!FPT || (FPT->getTypeQuals().empty() && FPT->getRefQualifier() == RQ_None))
return false;
S.Diag(Loc, diag::err_compound_qualified_function_type)
@@ -2117,8 +2166,8 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (!getLangOpts().CPlusPlus11 &&
ArraySize && !ArraySize->isTypeDependent() &&
!ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
- Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
- << ArraySize->getType() << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
}
@@ -2137,8 +2186,8 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
// of a VLA.
if (getLangOpts().CPlusPlus11 &&
!ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
- Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
- << ArraySize->getType() << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
return QualType();
}
@@ -2151,25 +2200,25 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
// have a value greater than zero.
if (ConstVal.isSigned() && ConstVal.isNegative()) {
if (Entity)
- Diag(ArraySize->getLocStart(), diag::err_decl_negative_array_size)
- << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), diag::err_decl_negative_array_size)
+ << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
else
- Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size)
- << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
return QualType();
}
if (ConstVal == 0) {
// GCC accepts zero sized static arrays. We allow them when
// we're not in a SFINAE context.
- Diag(ArraySize->getLocStart(),
- isSFINAEContext()? diag::err_typecheck_zero_array_size
- : diag::ext_typecheck_zero_array_size)
- << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), isSFINAEContext()
+ ? diag::err_typecheck_zero_array_size
+ : diag::ext_typecheck_zero_array_size)
+ << ArraySize->getSourceRange();
if (ASM == ArrayType::Static) {
- Diag(ArraySize->getLocStart(),
+ Diag(ArraySize->getBeginLoc(),
diag::warn_typecheck_zero_static_array_size)
- << ArraySize->getSourceRange();
+ << ArraySize->getSourceRange();
ASM = ArrayType::Normal;
}
} else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
@@ -2178,9 +2227,8 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
- Diag(ArraySize->getLocStart(), diag::err_array_too_large)
- << ConstVal.toString(10)
- << ArraySize->getSourceRange();
+ Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
+ << ConstVal.toString(10) << ArraySize->getSourceRange();
return QualType();
}
}
@@ -2842,6 +2890,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
// class template argument deduction)?
bool IsCXXAutoType =
(Auto && Auto->getKeyword() != AutoTypeKeyword::GNUAutoType);
+ bool IsDeducedReturnType = false;
switch (D.getContext()) {
case DeclaratorContext::LambdaExprContext:
@@ -2873,9 +2922,9 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
TemplateTypeParmDecl *CorrespondingTemplateParam =
TemplateTypeParmDecl::Create(
SemaRef.Context, SemaRef.Context.getTranslationUnitDecl(),
- /*KeyLoc*/SourceLocation(), /*NameLoc*/D.getLocStart(),
+ /*KeyLoc*/ SourceLocation(), /*NameLoc*/ D.getBeginLoc(),
TemplateParameterDepth, AutoParameterPosition,
- /*Identifier*/nullptr, false, IsParameterPack);
+ /*Identifier*/ nullptr, false, IsParameterPack);
LSI->AutoTemplateParams.push_back(CorrespondingTemplateParam);
// Replace the 'auto' in the function parameter with this invented
// template type parameter.
@@ -2933,10 +2982,12 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TrailingReturnVarContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 13; // Function return type
+ IsDeducedReturnType = true;
break;
case DeclaratorContext::ConversionIdContext:
if (!SemaRef.getLangOpts().CPlusPlus14 || !IsCXXAutoType)
Error = 14; // conversion-type-id
+ IsDeducedReturnType = true;
break;
case DeclaratorContext::FunctionalCastContext:
if (isa<DeducedTemplateSpecializationType>(Deduced))
@@ -3021,10 +3072,14 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
D.getContext() != DeclaratorContext::LambdaExprContext) {
// If there was a trailing return type, we already got
// warn_cxx98_compat_trailing_return_type in the parser.
- // If this was a lambda, we already warned on that too.
SemaRef.Diag(AutoRange.getBegin(),
- diag::warn_cxx98_compat_auto_type_specifier)
- << AutoRange;
+ D.getContext() ==
+ DeclaratorContext::LambdaExprParameterContext
+ ? diag::warn_cxx11_compat_generic_lambda
+ : IsDeducedReturnType
+ ? diag::warn_cxx11_compat_deduced_return_type
+ : diag::warn_cxx98_compat_auto_type_specifier)
+ << AutoRange;
}
}
@@ -3302,9 +3357,9 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
<< D.getIdentifier();
// FIXME: A cast to void is probably a better suggestion in cases where it's
// valid (when there is no initializer and we're not in a condition).
- S.Diag(D.getLocStart(), diag::note_function_style_cast_add_parentheses)
- << FixItHint::CreateInsertion(D.getLocStart(), "(")
- << FixItHint::CreateInsertion(S.getLocForEndOfToken(D.getLocEnd()), ")");
+ S.Diag(D.getBeginLoc(), diag::note_function_style_cast_add_parentheses)
+ << FixItHint::CreateInsertion(D.getBeginLoc(), "(")
+ << FixItHint::CreateInsertion(S.getLocForEndOfToken(D.getEndLoc()), ")");
S.Diag(Paren.Loc, diag::note_remove_parens_for_variable_declaration)
<< FixItHint::CreateRemoval(Paren.Loc)
<< FixItHint::CreateRemoval(Paren.EndLoc);
@@ -3834,6 +3889,37 @@ static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
return false;
}
+static bool IsNoDerefableChunk(DeclaratorChunk Chunk) {
+ return (Chunk.Kind == DeclaratorChunk::Pointer ||
+ Chunk.Kind == DeclaratorChunk::Array);
+}
+
+template<typename AttrT>
+static AttrT *createSimpleAttr(ASTContext &Ctx, ParsedAttr &Attr) {
+ Attr.setUsedAsTypeAttr();
+ return ::new (Ctx)
+ AttrT(Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex());
+}
+
+static Attr *createNullabilityAttr(ASTContext &Ctx, ParsedAttr &Attr,
+ NullabilityKind NK) {
+ switch (NK) {
+ case NullabilityKind::NonNull:
+ return createSimpleAttr<TypeNonNullAttr>(Ctx, Attr);
+
+ case NullabilityKind::Nullable:
+ return createSimpleAttr<TypeNullableAttr>(Ctx, Attr);
+
+ case NullabilityKind::Unspecified:
+ return createSimpleAttr<TypeNullUnspecifiedAttr>(Ctx, Attr);
+ }
+ llvm_unreachable("unknown NullabilityKind");
+}
+
+static TypeSourceInfo *
+GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
+ QualType T, TypeSourceInfo *ReturnTypeInfo);
+
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
QualType declSpecType,
TypeSourceInfo *TInfo) {
@@ -3858,7 +3944,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Does T refer to a function type with a cv-qualifier or a ref-qualifier?
bool IsQualifiedFunction = T->isFunctionProtoType() &&
- (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 ||
+ (!T->castAs<FunctionProtoType>()->getTypeQuals().empty() ||
T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
// If T is 'decltype(auto)', the only declarators we can have are parens
@@ -4128,7 +4214,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
SourceRange(pointerLoc), nullptr, SourceLocation(), nullptr, 0,
syntax);
- attrs.addAtStart(nullabilityAttr);
+ attrs.addAtEnd(nullabilityAttr);
if (inferNullabilityCS) {
state.getDeclarator().getMutableDeclSpec().getObjCQualifiers()
@@ -4184,11 +4270,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (auto *attr = inferPointerNullability(
pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
- D.getDeclSpec().getLocEnd(),
+ D.getDeclSpec().getEndLoc(),
D.getMutableDeclSpec().getAttributes())) {
- T = Context.getAttributedType(
- AttributedType::getNullabilityAttrKind(*inferNullability),T,T);
- attr->setUsedAsTypeAttr();
+ T = state.getAttributedType(
+ createNullabilityAttr(Context, *attr, *inferNullability), T, T);
}
}
}
@@ -4202,6 +4287,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
+ bool ExpectNoDerefChunk =
+ state.getCurrentAttributes().hasAttribute(ParsedAttr::AT_NoDeref);
+
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
@@ -4247,7 +4335,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc,
DeclType.EndLoc, DeclType.getAttrs());
- if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
+ if (LangOpts.ObjC && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
if (DeclType.Ptr.TypeQuals)
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
@@ -4367,7 +4455,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// does not have a K&R-style identifier list), then the arguments are part
// of the type, otherwise the argument list is ().
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
- IsQualifiedFunction = FTI.TypeQuals || FTI.hasRefQualifier();
+ IsQualifiedFunction =
+ FTI.hasMethodTypeQualifiers() || FTI.hasRefQualifier();
// Check for auto functions and trailing return type and adjust the
// return type accordingly.
@@ -4375,25 +4464,28 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// trailing-return-type is only required if we're declaring a function,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().hasAutoTypeSpec() &&
- !FTI.hasTrailingReturnType() && chunkIndex == 0 &&
- !S.getLangOpts().CPlusPlus14) {
- S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
- D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
- ? diag::err_auto_missing_trailing_return
- : diag::err_deduced_return_type);
- T = Context.IntTy;
- D.setInvalidType(true);
+ !FTI.hasTrailingReturnType() && chunkIndex == 0) {
+ if (!S.getLangOpts().CPlusPlus14) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto
+ ? diag::err_auto_missing_trailing_return
+ : diag::err_deduced_return_type);
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::warn_cxx11_compat_deduced_return_type);
+ }
} else if (FTI.hasTrailingReturnType()) {
// T must be exactly 'auto' at this point. See CWG issue 681.
if (isa<ParenType>(T)) {
- S.Diag(D.getLocStart(),
- diag::err_trailing_return_in_parens)
- << T << D.getSourceRange();
+ S.Diag(D.getBeginLoc(), diag::err_trailing_return_in_parens)
+ << T << D.getSourceRange();
D.setInvalidType(true);
} else if (D.getName().getKind() ==
UnqualifiedIdKind::IK_DeductionGuideName) {
if (T != Context.DependentTy) {
- S.Diag(D.getDeclSpec().getLocStart(),
+ S.Diag(D.getDeclSpec().getBeginLoc(),
diag::err_deduction_guide_with_complex_decl)
<< D.getSourceRange();
D.setInvalidType(true);
@@ -4413,6 +4505,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
T = Context.IntTy;
D.setInvalidType(true);
}
+ } else {
+ // This function type is not the type of the entity being declared,
+ // so checking the 'auto' is not the responsibility of this chunk.
}
}
@@ -4475,11 +4570,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (T->isObjCObjectType()) {
SourceLocation DiagLoc, FixitLoc;
if (TInfo) {
- DiagLoc = TInfo->getTypeLoc().getLocStart();
- FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getLocEnd());
+ DiagLoc = TInfo->getTypeLoc().getBeginLoc();
+ FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getEndLoc());
} else {
DiagLoc = D.getDeclSpec().getTypeSpecTypeLoc();
- FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getLocEnd());
+ FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getEndLoc());
}
S.Diag(DiagLoc, diag::err_object_cannot_be_passed_returned_by_value)
<< 0 << T
@@ -4599,7 +4694,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
EPI.ExtInfo = EI;
EPI.Variadic = FTI.isVariadic;
EPI.HasTrailingReturn = FTI.hasTrailingReturnType();
- EPI.TypeQuals = FTI.TypeQuals;
+ EPI.TypeQuals.addCVRUQualifiers(
+ FTI.MethodQualifiers ? FTI.MethodQualifiers->getTypeQualifiers()
+ : 0);
EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
: FTI.RefQualifierIsLValueRef? RQ_LValue
: RQ_RValue;
@@ -4726,6 +4823,20 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Exceptions,
EPI.ExceptionSpec);
+ const auto &Spec = D.getCXXScopeSpec();
+ // OpenCLCPlusPlus: A class member function has an address space.
+ if (state.getSema().getLangOpts().OpenCLCPlusPlus &&
+ ((!Spec.isEmpty() &&
+ Spec.getScopeRep()->getKind() == NestedNameSpecifier::TypeSpec) ||
+ state.getDeclarator().getContext() ==
+ DeclaratorContext::MemberContext)) {
+ LangAS CurAS = EPI.TypeQuals.getAddressSpace();
+ // If a class member function's address space is not set, set it to
+ // __generic.
+ LangAS AS =
+ (CurAS == LangAS::Default ? LangAS::opencl_generic : CurAS);
+ EPI.TypeQuals.addAddressSpace(AS);
+ }
T = Context.getFunctionType(T, ParamTys, EPI);
}
break;
@@ -4805,8 +4916,22 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// See if there are any attributes on this declarator chunk.
processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
+
+ if (DeclType.Kind != DeclaratorChunk::Paren) {
+ if (ExpectNoDerefChunk) {
+ if (!IsNoDerefableChunk(DeclType))
+ S.Diag(DeclType.Loc, diag::warn_noderef_on_non_pointer_or_array);
+ ExpectNoDerefChunk = false;
+ }
+
+ ExpectNoDerefChunk = state.didParseNoDeref();
+ }
}
+ if (ExpectNoDerefChunk)
+ S.Diag(state.getDeclarator().getBeginLoc(),
+ diag::warn_noderef_on_non_pointer_or_array);
+
// GNU warning -Wstrict-prototypes
// Warn if a function declaration is without a prototype.
// This warning is issued for all kinds of unprototyped function
@@ -4887,23 +5012,24 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
!IsTypedefName &&
D.getContext() != DeclaratorContext::TemplateArgContext &&
D.getContext() != DeclaratorContext::TemplateTypeArgContext) {
- SourceLocation Loc = D.getLocStart();
+ SourceLocation Loc = D.getBeginLoc();
SourceRange RemovalRange;
unsigned I;
if (D.isFunctionDeclarator(I)) {
SmallVector<SourceLocation, 4> RemovalLocs;
const DeclaratorChunk &Chunk = D.getTypeObject(I);
assert(Chunk.Kind == DeclaratorChunk::Function);
+
if (Chunk.Fun.hasRefQualifier())
RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc());
- if (Chunk.Fun.TypeQuals & Qualifiers::Const)
- RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc());
- if (Chunk.Fun.TypeQuals & Qualifiers::Volatile)
- RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc());
- if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
- RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
+
+ if (Chunk.Fun.hasMethodTypeQualifiers())
+ Chunk.Fun.MethodQualifiers->forEachQualifier(
+ [&](DeclSpec::TQ TypeQual, StringRef QualName,
+ SourceLocation SL) { RemovalLocs.push_back(SL); });
+
if (!RemovalLocs.empty()) {
- llvm::sort(RemovalLocs.begin(), RemovalLocs.end(),
+ llvm::sort(RemovalLocs,
BeforeThanCompare<SourceLocation>(S.getSourceManager()));
RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
Loc = RemovalLocs.front();
@@ -4917,7 +5043,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Strip the cv-qualifiers and ref-qualifiers from the type.
FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
- EPI.TypeQuals = 0;
+ EPI.TypeQuals.removeCVRQualifiers();
EPI.RefQualifier = RQ_None;
T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(),
@@ -5028,7 +5154,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (D.isInvalidType())
return Context.getTrivialTypeSourceInfo(T);
- return S.GetTypeSourceInfoForDeclarator(D, T, TInfo);
+ return GetTypeSourceInfoForDeclarator(state, T, TInfo);
}
/// GetTypeForDeclarator - Convert the type for the specified
@@ -5093,7 +5219,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
&S.Context.Idents.get("objc_ownership"), SourceLocation(),
/*scope*/ nullptr, SourceLocation(),
/*args*/ &Args, 1, ParsedAttr::AS_GNU);
- chunk.getAttrs().addAtStart(attr);
+ chunk.getAttrs().addAtEnd(attr);
// TODO: mark whether we did this inference?
}
@@ -5155,7 +5281,7 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
TypeSourceInfo *ReturnTypeInfo = nullptr;
QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
- if (getLangOpts().ObjC1) {
+ if (getLangOpts().ObjC) {
Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
if (ownership != Qualifiers::OCL_None)
transferARCOwnership(state, declSpecTy, ownership);
@@ -5164,131 +5290,25 @@ TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
}
-/// Map an AttributedType::Kind to an ParsedAttr::Kind.
-static ParsedAttr::Kind getAttrListKind(AttributedType::Kind kind) {
- switch (kind) {
- case AttributedType::attr_address_space:
- return ParsedAttr::AT_AddressSpace;
- case AttributedType::attr_regparm:
- return ParsedAttr::AT_Regparm;
- case AttributedType::attr_vector_size:
- return ParsedAttr::AT_VectorSize;
- case AttributedType::attr_neon_vector_type:
- return ParsedAttr::AT_NeonVectorType;
- case AttributedType::attr_neon_polyvector_type:
- return ParsedAttr::AT_NeonPolyVectorType;
- case AttributedType::attr_objc_gc:
- return ParsedAttr::AT_ObjCGC;
- case AttributedType::attr_objc_ownership:
- case AttributedType::attr_objc_inert_unsafe_unretained:
- return ParsedAttr::AT_ObjCOwnership;
- case AttributedType::attr_noreturn:
- return ParsedAttr::AT_NoReturn;
- case AttributedType::attr_nocf_check:
- return ParsedAttr::AT_AnyX86NoCfCheck;
- case AttributedType::attr_cdecl:
- return ParsedAttr::AT_CDecl;
- case AttributedType::attr_fastcall:
- return ParsedAttr::AT_FastCall;
- case AttributedType::attr_stdcall:
- return ParsedAttr::AT_StdCall;
- case AttributedType::attr_thiscall:
- return ParsedAttr::AT_ThisCall;
- case AttributedType::attr_regcall:
- return ParsedAttr::AT_RegCall;
- case AttributedType::attr_pascal:
- return ParsedAttr::AT_Pascal;
- case AttributedType::attr_swiftcall:
- return ParsedAttr::AT_SwiftCall;
- case AttributedType::attr_vectorcall:
- return ParsedAttr::AT_VectorCall;
- case AttributedType::attr_pcs:
- case AttributedType::attr_pcs_vfp:
- return ParsedAttr::AT_Pcs;
- case AttributedType::attr_inteloclbicc:
- return ParsedAttr::AT_IntelOclBicc;
- case AttributedType::attr_ms_abi:
- return ParsedAttr::AT_MSABI;
- case AttributedType::attr_sysv_abi:
- return ParsedAttr::AT_SysVABI;
- case AttributedType::attr_preserve_most:
- return ParsedAttr::AT_PreserveMost;
- case AttributedType::attr_preserve_all:
- return ParsedAttr::AT_PreserveAll;
- case AttributedType::attr_ptr32:
- return ParsedAttr::AT_Ptr32;
- case AttributedType::attr_ptr64:
- return ParsedAttr::AT_Ptr64;
- case AttributedType::attr_sptr:
- return ParsedAttr::AT_SPtr;
- case AttributedType::attr_uptr:
- return ParsedAttr::AT_UPtr;
- case AttributedType::attr_nonnull:
- return ParsedAttr::AT_TypeNonNull;
- case AttributedType::attr_nullable:
- return ParsedAttr::AT_TypeNullable;
- case AttributedType::attr_null_unspecified:
- return ParsedAttr::AT_TypeNullUnspecified;
- case AttributedType::attr_objc_kindof:
- return ParsedAttr::AT_ObjCKindOf;
- case AttributedType::attr_ns_returns_retained:
- return ParsedAttr::AT_NSReturnsRetained;
- case AttributedType::attr_lifetimebound:
- return ParsedAttr::AT_LifetimeBound;
- }
- llvm_unreachable("unexpected attribute kind!");
-}
-
-static void setAttributedTypeLoc(AttributedTypeLoc TL, const ParsedAttr &attr) {
- TL.setAttrNameLoc(attr.getLoc());
- if (TL.hasAttrExprOperand()) {
- assert(attr.isArgExpr(0) && "mismatched attribute operand kind");
- TL.setAttrExprOperand(attr.getArgAsExpr(0));
- } else if (TL.hasAttrEnumOperand()) {
- assert((attr.isArgIdent(0) || attr.isArgExpr(0)) &&
- "unexpected attribute operand kind");
- if (attr.isArgIdent(0))
- TL.setAttrEnumOperandLoc(attr.getArgAsIdent(0)->Loc);
- else
- TL.setAttrEnumOperandLoc(attr.getArgAsExpr(0)->getExprLoc());
- }
-
- // FIXME: preserve this information to here.
- if (TL.hasAttrOperand())
- TL.setAttrOperandParensRange(SourceRange());
-}
-
static void fillAttributedTypeLoc(AttributedTypeLoc TL,
- const ParsedAttributesView &Attrs,
- const ParsedAttributesView &DeclAttrs) {
- // DeclAttrs and Attrs cannot be both empty.
- assert((!Attrs.empty() || !DeclAttrs.empty()) &&
- "no type attributes in the expected location!");
-
- ParsedAttr::Kind parsedKind = getAttrListKind(TL.getAttrKind());
- // Try to search for an attribute of matching kind in Attrs list.
- for (const ParsedAttr &AL : Attrs)
- if (AL.getKind() == parsedKind)
- return setAttributedTypeLoc(TL, AL);
-
- for (const ParsedAttr &AL : DeclAttrs)
- if (AL.isCXX11Attribute() || AL.getKind() == parsedKind)
- return setAttributedTypeLoc(TL, AL);
- llvm_unreachable("no matching type attribute in expected location!");
+ TypeProcessingState &State) {
+ TL.setAttr(State.takeAttrForAttributedType(TL.getTypePtr()));
}
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
ASTContext &Context;
+ TypeProcessingState &State;
const DeclSpec &DS;
public:
- TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
- : Context(Context), DS(DS) {}
+ TypeSpecLocFiller(ASTContext &Context, TypeProcessingState &State,
+ const DeclSpec &DS)
+ : Context(Context), State(State), DS(DS) {}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- fillAttributedTypeLoc(TL, DS.getAttributes(), ParsedAttributesView{});
Visit(TL.getModifiedLoc());
+ fillAttributedTypeLoc(TL, State);
}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
Visit(TL.getUnqualifiedLoc());
@@ -5301,7 +5321,7 @@ namespace {
// FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires
// addition field. What we have is good enough for dispay of location
// of 'fixit' on interface name.
- TL.setNameEndLoc(DS.getLocEnd());
+ TL.setNameEndLoc(DS.getEndLoc());
}
void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
TypeSourceInfo *RepTInfo = nullptr;
@@ -5445,11 +5465,13 @@ namespace {
class DeclaratorLocFiller : public TypeLocVisitor<DeclaratorLocFiller> {
ASTContext &Context;
+ TypeProcessingState &State;
const DeclaratorChunk &Chunk;
public:
- DeclaratorLocFiller(ASTContext &Context, const DeclaratorChunk &Chunk)
- : Context(Context), Chunk(Chunk) {}
+ DeclaratorLocFiller(ASTContext &Context, TypeProcessingState &State,
+ const DeclaratorChunk &Chunk)
+ : Context(Context), State(State), Chunk(Chunk) {}
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
llvm_unreachable("qualified type locs not expected here!");
@@ -5459,7 +5481,7 @@ namespace {
}
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- fillAttributedTypeLoc(TL, Chunk.getAttrs(), ParsedAttributesView{});
+ fillAttributedTypeLoc(TL, State);
}
void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing
@@ -5616,10 +5638,13 @@ fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
/// up in the normal place in the declaration specifiers (such as a C++
/// conversion function), this pointer will refer to a type source information
/// for that return type.
-TypeSourceInfo *
-Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
- TypeSourceInfo *ReturnTypeInfo) {
- TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
+static TypeSourceInfo *
+GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
+ QualType T, TypeSourceInfo *ReturnTypeInfo) {
+ Sema &S = State.getSema();
+ Declarator &D = State.getDeclarator();
+
+ TypeSourceInfo *TInfo = S.Context.CreateTypeSourceInfo(T);
UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
// Handle parameter packs whose type is a pack expansion.
@@ -5629,13 +5654,6 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
}
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
-
- if (DependentAddressSpaceTypeLoc DASTL =
- CurrTL.getAs<DependentAddressSpaceTypeLoc>()) {
- fillDependentAddressSpaceTypeLoc(DASTL, D.getTypeObject(i).getAttrs());
- CurrTL = DASTL.getPointeeTypeLoc().getUnqualifiedLoc();
- }
-
// An AtomicTypeLoc might be produced by an atomic qualifier in this
// declarator chunk.
if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) {
@@ -5644,16 +5662,21 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
}
while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
- fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs(),
- D.getAttributes());
+ fillAttributedTypeLoc(TL, State);
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
}
+ while (DependentAddressSpaceTypeLoc TL =
+ CurrTL.getAs<DependentAddressSpaceTypeLoc>()) {
+ fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs());
+ CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
+ }
+
// FIXME: Ordering here?
while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
- DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
+ DeclaratorLocFiller(S.Context, State, D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
}
@@ -5664,7 +5687,7 @@ Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
} else {
- TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
+ TypeSpecLocFiller(S.Context, State, D.getDeclSpec()).Visit(CurrTL);
}
return TInfo;
@@ -5801,7 +5824,10 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
/// specified type. The attribute contains 1 argument, the id of the address
/// space for the type.
static void HandleAddressSpaceTypeAttribute(QualType &Type,
- const ParsedAttr &Attr, Sema &S) {
+ const ParsedAttr &Attr,
+ TypeProcessingState &State) {
+ Sema &S = State.getSema();
+
// ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be
// qualified by an address-space qualifier."
if (Type->isFunctionType()) {
@@ -5815,8 +5841,8 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
+ << 1;
Attr.setInvalid();
return;
}
@@ -5843,10 +5869,15 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
// the type.
QualType T = S.BuildAddressSpaceAttr(Type, ASArgExpr, Attr.getLoc());
- if (!T.isNull())
- Type = T;
- else
+ if (!T.isNull()) {
+ ASTContext &Ctx = S.Context;
+ auto *ASAttr = ::new (Ctx) AddressSpaceAttr(
+ Attr.getRange(), Ctx, Attr.getAttributeSpellingListIndex(),
+ static_cast<unsigned>(T.getQualifiers().getAddressSpace()));
+ Type = State.getAttributedType(ASAttr, T, T);
+ } else {
Attr.setInvalid();
+ }
} else {
// The keyword-based type attributes imply which address space to use.
switch (Attr.getKind()) {
@@ -5893,7 +5924,7 @@ static bool hasDirectOwnershipQualifier(QualType type) {
while (true) {
// __strong id
if (const AttributedType *attr = dyn_cast<AttributedType>(type)) {
- if (attr->getAttrKind() == AttributedType::attr_objc_ownership)
+ if (attr->getAttrKind() == attr::ObjCOwnership)
return true;
type = attr->getModifiedType();
@@ -5951,8 +5982,8 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
S.getSourceManager().getImmediateExpansionRange(AttrLoc).getBegin();
if (!attr.isArgIdent(0)) {
- S.Diag(AttrLoc, diag::err_attribute_argument_type)
- << attr.getName() << AANT_ArgumentString;
+ S.Diag(AttrLoc, diag::err_attribute_argument_type) << attr
+ << AANT_ArgumentString;
attr.setInvalid();
return true;
}
@@ -6037,9 +6068,9 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
// the coexistence problems with __unsafe_unretained.
if (!S.getLangOpts().ObjCAutoRefCount &&
lifetime == Qualifiers::OCL_ExplicitNone) {
- type = S.Context.getAttributedType(
- AttributedType::attr_objc_inert_unsafe_unretained,
- type, type);
+ type = state.getAttributedType(
+ createSimpleAttr<ObjCInertUnsafeUnretainedAttr>(S.Context, attr),
+ type, type);
return true;
}
@@ -6049,9 +6080,12 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
// If we have a valid source location for the attribute, use an
// AttributedType instead.
- if (AttrLoc.isValid())
- type = S.Context.getAttributedType(AttributedType::attr_objc_ownership,
- origType, type);
+ if (AttrLoc.isValid()) {
+ type = state.getAttributedType(::new (S.Context) ObjCOwnershipAttr(
+ attr.getRange(), S.Context, II,
+ attr.getAttributeSpellingListIndex()),
+ origType, type);
+ }
auto diagnoseOrDelay = [](Sema &S, SourceLocation loc,
unsigned diagnostic, QualType type) {
@@ -6122,14 +6156,14 @@ static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// Check the attribute arguments.
if (!attr.isArgIdent(0)) {
S.Diag(attr.getLoc(), diag::err_attribute_argument_type)
- << attr.getName() << AANT_ArgumentString;
+ << attr << AANT_ArgumentString;
attr.setInvalid();
return true;
}
Qualifiers::GC GCAttr;
if (attr.getNumArgs() > 1) {
- S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << attr.getName() << 1;
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << attr
+ << 1;
attr.setInvalid();
return true;
}
@@ -6151,8 +6185,10 @@ static bool handleObjCGCTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// Make an attributed type to preserve the source information.
if (attr.getLoc().isValid())
- type = S.Context.getAttributedType(AttributedType::attr_objc_gc,
- origType, type);
+ type = state.getAttributedType(
+ ::new (S.Context) ObjCGCAttr(attr.getRange(), S.Context, II,
+ attr.getAttributeSpellingListIndex()),
+ origType, type);
return true;
}
@@ -6295,37 +6331,50 @@ namespace {
} // end anonymous namespace
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
- ParsedAttr &Attr, QualType &Type) {
+ ParsedAttr &PAttr, QualType &Type) {
Sema &S = State.getSema();
- ParsedAttr::Kind Kind = Attr.getKind();
+ Attr *A;
+ switch (PAttr.getKind()) {
+ default: llvm_unreachable("Unknown attribute kind");
+ case ParsedAttr::AT_Ptr32:
+ A = createSimpleAttr<Ptr32Attr>(S.Context, PAttr);
+ break;
+ case ParsedAttr::AT_Ptr64:
+ A = createSimpleAttr<Ptr64Attr>(S.Context, PAttr);
+ break;
+ case ParsedAttr::AT_SPtr:
+ A = createSimpleAttr<SPtrAttr>(S.Context, PAttr);
+ break;
+ case ParsedAttr::AT_UPtr:
+ A = createSimpleAttr<UPtrAttr>(S.Context, PAttr);
+ break;
+ }
+
+ attr::Kind NewAttrKind = A->getKind();
QualType Desugared = Type;
const AttributedType *AT = dyn_cast<AttributedType>(Type);
while (AT) {
- AttributedType::Kind CurAttrKind = AT->getAttrKind();
+ attr::Kind CurAttrKind = AT->getAttrKind();
// You cannot specify duplicate type attributes, so if the attribute has
// already been applied, flag it.
- if (getAttrListKind(CurAttrKind) == Kind) {
- S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact)
- << Attr.getName();
+ if (NewAttrKind == CurAttrKind) {
+ S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact)
+ << PAttr.getName();
return true;
}
// You cannot have both __sptr and __uptr on the same type, nor can you
// have __ptr32 and __ptr64.
- if ((CurAttrKind == AttributedType::attr_ptr32 &&
- Kind == ParsedAttr::AT_Ptr64) ||
- (CurAttrKind == AttributedType::attr_ptr64 &&
- Kind == ParsedAttr::AT_Ptr32)) {
- S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ if ((CurAttrKind == attr::Ptr32 && NewAttrKind == attr::Ptr64) ||
+ (CurAttrKind == attr::Ptr64 && NewAttrKind == attr::Ptr32)) {
+ S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'" << "'__ptr64'";
return true;
- } else if ((CurAttrKind == AttributedType::attr_sptr &&
- Kind == ParsedAttr::AT_UPtr) ||
- (CurAttrKind == AttributedType::attr_uptr &&
- Kind == ParsedAttr::AT_SPtr)) {
- S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ } else if ((CurAttrKind == attr::SPtr && NewAttrKind == attr::UPtr) ||
+ (CurAttrKind == attr::UPtr && NewAttrKind == attr::SPtr)) {
+ S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'" << "'__uptr'";
return true;
}
@@ -6336,43 +6385,64 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
// Pointer type qualifiers can only operate on pointer types, but not
// pointer-to-member types.
+ //
+ // FIXME: Should we really be disallowing this attribute if there is any
+ // type sugar between it and the pointer (other than attributes)? Eg, this
+ // disallows the attribute on a parenthesized pointer.
+ // And if so, should we really allow *any* type attribute?
if (!isa<PointerType>(Desugared)) {
if (Type->isMemberPointerType())
- S.Diag(Attr.getLoc(), diag::err_attribute_no_member_pointers)
- << Attr.getName();
+ S.Diag(PAttr.getLoc(), diag::err_attribute_no_member_pointers) << PAttr;
else
- S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only)
- << Attr.getName() << 0;
+ S.Diag(PAttr.getLoc(), diag::err_attribute_pointers_only) << PAttr << 0;
return true;
}
- AttributedType::Kind TAK;
- switch (Kind) {
- default: llvm_unreachable("Unknown attribute kind");
- case ParsedAttr::AT_Ptr32:
- TAK = AttributedType::attr_ptr32;
- break;
- case ParsedAttr::AT_Ptr64:
- TAK = AttributedType::attr_ptr64;
- break;
- case ParsedAttr::AT_SPtr:
- TAK = AttributedType::attr_sptr;
- break;
- case ParsedAttr::AT_UPtr:
- TAK = AttributedType::attr_uptr;
- break;
- }
-
- Type = S.Context.getAttributedType(TAK, Type, Type);
+ Type = State.getAttributedType(A, Type, Type);
return false;
}
-bool Sema::checkNullabilityTypeSpecifier(QualType &type,
- NullabilityKind nullability,
- SourceLocation nullabilityLoc,
- bool isContextSensitive,
- bool allowOnArrayType) {
- recordNullabilitySeen(*this, nullabilityLoc);
+/// Map a nullability attribute kind to a nullability kind.
+static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
+ switch (kind) {
+ case ParsedAttr::AT_TypeNonNull:
+ return NullabilityKind::NonNull;
+
+ case ParsedAttr::AT_TypeNullable:
+ return NullabilityKind::Nullable;
+
+ case ParsedAttr::AT_TypeNullUnspecified:
+ return NullabilityKind::Unspecified;
+
+ default:
+ llvm_unreachable("not a nullability attribute kind");
+ }
+}
+
+/// Applies a nullability type specifier to the given type, if possible.
+///
+/// \param state The type processing state.
+///
+/// \param type The type to which the nullability specifier will be
+/// added. On success, this type will be updated appropriately.
+///
+/// \param attr The attribute as written on the type.
+///
+/// \param allowOnArrayType Whether to accept nullability specifiers on an
+/// array type (e.g., because it will decay to a pointer).
+///
+/// \returns true if a problem has been diagnosed, false on success.
+static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
+ QualType &type,
+ ParsedAttr &attr,
+ bool allowOnArrayType) {
+ Sema &S = state.getSema();
+
+ NullabilityKind nullability = mapNullabilityAttrKind(attr.getKind());
+ SourceLocation nullabilityLoc = attr.getLoc();
+ bool isContextSensitive = attr.isContextSensitiveKeywordAttribute();
+
+ recordNullabilitySeen(S, nullabilityLoc);
// Check for existing nullability attributes on the type.
QualType desugared = type;
@@ -6381,7 +6451,7 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
if (auto existingNullability = attributed->getImmediateNullability()) {
// Duplicated nullability.
if (nullability == *existingNullability) {
- Diag(nullabilityLoc, diag::warn_nullability_duplicate)
+ S.Diag(nullabilityLoc, diag::warn_nullability_duplicate)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< FixItHint::CreateRemoval(nullabilityLoc);
@@ -6389,7 +6459,7 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
}
// Conflicting nullability.
- Diag(nullabilityLoc, diag::err_nullability_conflicting)
+ S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< DiagNullabilityKind(*existingNullability, false);
return true;
@@ -6402,9 +6472,9 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
// This (unlike the code above) looks through typedefs that might
// have nullability specifiers on them, which means we cannot
// provide a useful Fix-It.
- if (auto existingNullability = desugared->getNullability(Context)) {
+ if (auto existingNullability = desugared->getNullability(S.Context)) {
if (nullability != *existingNullability) {
- Diag(nullabilityLoc, diag::err_nullability_conflicting)
+ S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
<< DiagNullabilityKind(nullability, isContextSensitive)
<< DiagNullabilityKind(*existingNullability, false);
@@ -6415,7 +6485,7 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
if (auto typedefNullability
= AttributedType::stripOuterNullability(underlyingType)) {
if (*typedefNullability == *existingNullability) {
- Diag(typedefDecl->getLocation(), diag::note_nullability_here)
+ S.Diag(typedefDecl->getLocation(), diag::note_nullability_here)
<< DiagNullabilityKind(*existingNullability, false);
}
}
@@ -6428,7 +6498,7 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
// If this definitely isn't a pointer type, reject the specifier.
if (!desugared->canHaveNullability() &&
!(allowOnArrayType && desugared->isArrayType())) {
- Diag(nullabilityLoc, diag::err_nullability_nonpointer)
+ S.Diag(nullabilityLoc, diag::err_nullability_nonpointer)
<< DiagNullabilityKind(nullability, isContextSensitive) << type;
return true;
}
@@ -6446,10 +6516,10 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
if (pointeeType->isAnyPointerType() ||
pointeeType->isObjCObjectPointerType() ||
pointeeType->isMemberPointerType()) {
- Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
+ S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
<< DiagNullabilityKind(nullability, true)
<< type;
- Diag(nullabilityLoc, diag::note_nullability_type_specifier)
+ S.Diag(nullabilityLoc, diag::note_nullability_type_specifier)
<< DiagNullabilityKind(nullability, false)
<< type
<< FixItHint::CreateReplacement(nullabilityLoc,
@@ -6459,16 +6529,21 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
}
// Form the attributed type.
- type = Context.getAttributedType(
- AttributedType::getNullabilityAttrKind(nullability), type, type);
+ type = state.getAttributedType(
+ createNullabilityAttr(S.Context, attr, nullability), type, type);
return false;
}
-bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
+/// Check the application of the Objective-C '__kindof' qualifier to
+/// the given type.
+static bool checkObjCKindOfType(TypeProcessingState &state, QualType &type,
+ ParsedAttr &attr) {
+ Sema &S = state.getSema();
+
if (isa<ObjCTypeParamType>(type)) {
// Build the attributed type to record where __kindof occurred.
- type = Context.getAttributedType(AttributedType::attr_objc_kindof,
- type, type);
+ type = state.getAttributedType(
+ createSimpleAttr<ObjCKindOfAttr>(S.Context, attr), type, type);
return false;
}
@@ -6480,7 +6555,7 @@ bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
// If not, we can't apply __kindof.
if (!objType) {
// FIXME: Handle dependent types that aren't yet object types.
- Diag(loc, diag::err_objc_kindof_nonobject)
+ S.Diag(attr.getLoc(), diag::err_objc_kindof_nonobject)
<< type;
return true;
}
@@ -6488,45 +6563,31 @@ bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
// Rebuild the "equivalent" type, which pushes __kindof down into
// the object type.
// There is no need to apply kindof on an unqualified id type.
- QualType equivType = Context.getObjCObjectType(
+ QualType equivType = S.Context.getObjCObjectType(
objType->getBaseType(), objType->getTypeArgsAsWritten(),
objType->getProtocols(),
/*isKindOf=*/objType->isObjCUnqualifiedId() ? false : true);
// If we started with an object pointer type, rebuild it.
if (ptrType) {
- equivType = Context.getObjCObjectPointerType(equivType);
- if (auto nullability = type->getNullability(Context)) {
- auto attrKind = AttributedType::getNullabilityAttrKind(*nullability);
- equivType = Context.getAttributedType(attrKind, equivType, equivType);
+ equivType = S.Context.getObjCObjectPointerType(equivType);
+ if (auto nullability = type->getNullability(S.Context)) {
+ // We create a nullability attribute from the __kindof attribute.
+ // Make sure that will make sense.
+ assert(attr.getAttributeSpellingListIndex() == 0 &&
+ "multiple spellings for __kindof?");
+ Attr *A = createNullabilityAttr(S.Context, attr, *nullability);
+ A->setImplicit(true);
+ equivType = state.getAttributedType(A, equivType, equivType);
}
}
// Build the attributed type to record where __kindof occurred.
- type = Context.getAttributedType(AttributedType::attr_objc_kindof,
- type,
- equivType);
-
+ type = state.getAttributedType(
+ createSimpleAttr<ObjCKindOfAttr>(S.Context, attr), type, equivType);
return false;
}
-/// Map a nullability attribute kind to a nullability kind.
-static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
- switch (kind) {
- case ParsedAttr::AT_TypeNonNull:
- return NullabilityKind::NonNull;
-
- case ParsedAttr::AT_TypeNullable:
- return NullabilityKind::Nullable;
-
- case ParsedAttr::AT_TypeNullUnspecified:
- return NullabilityKind::Unspecified;
-
- default:
- llvm_unreachable("not a nullability attribute kind");
- }
-}
-
/// Distribute a nullability type attribute that cannot be applied to
/// the type specifier to a pointer, block pointer, or member pointer
/// declarator, complaining if necessary.
@@ -6614,27 +6675,29 @@ static bool distributeNullabilityTypeAttr(TypeProcessingState &state,
return false;
}
-static AttributedType::Kind getCCTypeAttrKind(ParsedAttr &Attr) {
+static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
assert(!Attr.isInvalid());
switch (Attr.getKind()) {
default:
llvm_unreachable("not a calling convention attribute");
case ParsedAttr::AT_CDecl:
- return AttributedType::attr_cdecl;
+ return createSimpleAttr<CDeclAttr>(Ctx, Attr);
case ParsedAttr::AT_FastCall:
- return AttributedType::attr_fastcall;
+ return createSimpleAttr<FastCallAttr>(Ctx, Attr);
case ParsedAttr::AT_StdCall:
- return AttributedType::attr_stdcall;
+ return createSimpleAttr<StdCallAttr>(Ctx, Attr);
case ParsedAttr::AT_ThisCall:
- return AttributedType::attr_thiscall;
+ return createSimpleAttr<ThisCallAttr>(Ctx, Attr);
case ParsedAttr::AT_RegCall:
- return AttributedType::attr_regcall;
+ return createSimpleAttr<RegCallAttr>(Ctx, Attr);
case ParsedAttr::AT_Pascal:
- return AttributedType::attr_pascal;
+ return createSimpleAttr<PascalAttr>(Ctx, Attr);
case ParsedAttr::AT_SwiftCall:
- return AttributedType::attr_swiftcall;
+ return createSimpleAttr<SwiftCallAttr>(Ctx, Attr);
case ParsedAttr::AT_VectorCall:
- return AttributedType::attr_vectorcall;
+ return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AArch64VectorPcs:
+ return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
@@ -6644,20 +6707,22 @@ static AttributedType::Kind getCCTypeAttrKind(ParsedAttr &Attr) {
Str = cast<StringLiteral>(Attr.getArgAsExpr(0))->getString();
else
Str = Attr.getArgAsIdent(0)->Ident->getName();
- return llvm::StringSwitch<AttributedType::Kind>(Str)
- .Case("aapcs", AttributedType::attr_pcs)
- .Case("aapcs-vfp", AttributedType::attr_pcs_vfp);
+ PcsAttr::PCSType Type;
+ if (!PcsAttr::ConvertStrToPCSType(Str, Type))
+ llvm_unreachable("already validated the attribute");
+ return ::new (Ctx) PcsAttr(Attr.getRange(), Ctx, Type,
+ Attr.getAttributeSpellingListIndex());
}
case ParsedAttr::AT_IntelOclBicc:
- return AttributedType::attr_inteloclbicc;
+ return createSimpleAttr<IntelOclBiccAttr>(Ctx, Attr);
case ParsedAttr::AT_MSABI:
- return AttributedType::attr_ms_abi;
+ return createSimpleAttr<MSABIAttr>(Ctx, Attr);
case ParsedAttr::AT_SysVABI:
- return AttributedType::attr_sysv_abi;
+ return createSimpleAttr<SysVABIAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveMost:
- return AttributedType::attr_preserve_most;
+ return createSimpleAttr<PreserveMostAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveAll:
- return AttributedType::attr_preserve_all;
+ return createSimpleAttr<PreserveAllAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
@@ -6705,8 +6770,9 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
= unwrapped.get()->getExtInfo().withProducesResult(true);
type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
}
- type = S.Context.getAttributedType(AttributedType::attr_ns_returns_retained,
- origType, type);
+ type = state.getAttributedType(
+ createSimpleAttr<NSReturnsRetainedAttr>(S.Context, attr),
+ origType, type);
return true;
}
@@ -6781,13 +6847,12 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
const FunctionType *fn = unwrapped.get();
CallingConv CCOld = fn->getCallConv();
- AttributedType::Kind CCAttrKind = getCCTypeAttrKind(attr);
+ Attr *CCAttr = getCCTypeAttr(S.Context, attr);
if (CCOld != CC) {
// Error out on when there's already an attribute on the type
// and the CCs don't match.
- const AttributedType *AT = S.getCallingConvAttributedType(type);
- if (AT && AT->getAttrKind() != CCAttrKind) {
+ if (S.getCallingConvAttributedType(type)) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
<< FunctionType::getNameForCallConv(CC)
<< FunctionType::getNameForCallConv(CCOld);
@@ -6841,7 +6906,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
Equivalent =
unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
}
- type = S.Context.getAttributedType(CCAttrKind, type, Equivalent);
+ type = state.getAttributedType(CCAttr, type, Equivalent);
return true;
}
@@ -6906,8 +6971,8 @@ static void HandleVectorSizeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
+ << 1;
Attr.setInvalid();
return;
}
@@ -6943,8 +7008,8 @@ static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
+ << 1;
return;
}
@@ -7032,14 +7097,14 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S, VectorType::VectorKind VecKind) {
// Target must have NEON
if (!S.Context.getTargetInfo().hasFeature("neon")) {
- S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr.getName();
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr;
Attr.setInvalid();
return;
}
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
- << Attr.getName() << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
+ << 1;
Attr.setInvalid();
return;
}
@@ -7049,8 +7114,8 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
!numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentIntegerConstant
- << numEltsExpr->getSourceRange();
+ << Attr << AANT_ArgumentIntegerConstant
+ << numEltsExpr->getSourceRange();
Attr.setInvalid();
return;
}
@@ -7085,22 +7150,43 @@ static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
}
if (const TypedefType* TypedefTy = CurType->getAs<TypedefType>()) {
- QualType PointeeTy = TypedefTy->desugar();
- S.Diag(Attr.getLoc(), diag::err_opencl_multiple_access_qualifiers);
+ QualType BaseTy = TypedefTy->desugar();
std::string PrevAccessQual;
- switch (cast<BuiltinType>(PointeeTy.getTypePtr())->getKind()) {
- #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id: \
- PrevAccessQual = #Access; \
- break;
- #include "clang/Basic/OpenCLImageTypes.def"
- default:
- assert(0 && "Unable to find corresponding image type.");
+ if (BaseTy->isPipeType()) {
+ if (TypedefTy->getDecl()->hasAttr<OpenCLAccessAttr>()) {
+ OpenCLAccessAttr *Attr =
+ TypedefTy->getDecl()->getAttr<OpenCLAccessAttr>();
+ PrevAccessQual = Attr->getSpelling();
+ } else {
+ PrevAccessQual = "read_only";
+ }
+ } else if (const BuiltinType* ImgType = BaseTy->getAs<BuiltinType>()) {
+
+ switch (ImgType->getKind()) {
+ #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ PrevAccessQual = #Access; \
+ break;
+ #include "clang/Basic/OpenCLImageTypes.def"
+ default:
+ llvm_unreachable("Unable to find corresponding image type.");
+ }
+ } else {
+ llvm_unreachable("unexpected type");
+ }
+ StringRef AttrName = Attr.getName()->getName();
+ if (PrevAccessQual == AttrName.ltrim("_")) {
+ // Duplicated qualifiers
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_declspec)
+ << AttrName << Attr.getRange();
+ } else {
+ // Contradicting qualifiers
+ S.Diag(Attr.getLoc(), diag::err_opencl_multiple_access_qualifiers);
}
- S.Diag(TypedefTy->getDecl()->getLocStart(),
- diag::note_opencl_typedef_access_qualifier) << PrevAccessQual;
+ S.Diag(TypedefTy->getDecl()->getBeginLoc(),
+ diag::note_opencl_typedef_access_qualifier) << PrevAccessQual;
} else if (CurType->isPipeType()) {
if (Attr.getSemanticSpelling() == OpenCLAccessAttr::Keyword_write_only) {
QualType ElemType = CurType->getAs<PipeType>()->getElementType();
@@ -7136,7 +7222,8 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
bool IsPointee =
ChunkIndex > 0 &&
(D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Pointer ||
- D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::BlockPointer);
+ D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::BlockPointer ||
+ D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Reference);
bool IsFuncReturnType =
ChunkIndex > 0 &&
D.getTypeObject(ChunkIndex - 1).Kind == DeclaratorChunk::Function;
@@ -7156,10 +7243,13 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
!IsPointee) ||
// Do not deduce addr space of the void type, e.g. in f(void), otherwise
// it will fail some sema check.
- (T->isVoidType() && !IsPointee))
+ (T->isVoidType() && !IsPointee) ||
+ // Do not deduce address spaces for dependent types because they might end
+ // up instantiating to a type with an explicit address space qualifier.
+ T->isDependentType())
return;
- LangAS ImpAddr;
+ LangAS ImpAddr = LangAS::Default;
// Put OpenCL automatic variable in private address space.
// OpenCL v1.2 s6.5:
// The default address space name for arguments to a function in a
@@ -7181,7 +7271,9 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
if (IsPointee) {
ImpAddr = LangAS::opencl_generic;
} else {
- if (D.getContext() == DeclaratorContext::FileContext) {
+ if (D.getContext() == DeclaratorContext::TemplateArgContext) {
+ // Do not deduce address space for non-pointee type in template arg.
+ } else if (D.getContext() == DeclaratorContext::FileContext) {
ImpAddr = LangAS::opencl_global;
} else {
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
@@ -7196,14 +7288,15 @@ static void deduceOpenCLImplicitAddrSpace(TypeProcessingState &State,
T = State.getSema().Context.getAddrSpaceQualType(T, ImpAddr);
}
-static void HandleLifetimeBoundAttr(QualType &CurType,
- const ParsedAttr &Attr,
- Sema &S, Declarator &D) {
- if (D.isDeclarationOfFunction()) {
- CurType = S.Context.getAttributedType(AttributedType::attr_lifetimebound,
- CurType, CurType);
+static void HandleLifetimeBoundAttr(TypeProcessingState &State,
+ QualType &CurType,
+ ParsedAttr &Attr) {
+ if (State.getDeclarator().isDeclarationOfFunction()) {
+ CurType = State.getAttributedType(
+ createSimpleAttr<LifetimeBoundAttr>(State.getSema().Context, Attr),
+ CurType, CurType);
} else {
- Attr.diagnoseAppertainsTo(S, nullptr);
+ Attr.diagnoseAppertainsTo(State.getSema(), nullptr);
}
}
@@ -7220,6 +7313,9 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// sure we visit every element once. Copy the attributes list, and iterate
// over that.
ParsedAttributesView AttrsCopy{attrs};
+
+ state.setParsedNoDeref(false);
+
for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
@@ -7231,7 +7327,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
// incompatibility.
- if (attr.getScopeName() && attr.getScopeName()->isStr("gnu")) {
+ if (attr.isGNUScope()) {
bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
@@ -7256,7 +7352,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// A C++11 attribute on a declarator chunk must appertain to a type.
if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
- << attr.getName();
+ << attr;
attr.setUsedAsTypeAttr();
}
break;
@@ -7282,7 +7378,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case ParsedAttr::AT_OpenCLConstantAddressSpace:
case ParsedAttr::AT_OpenCLGenericAddressSpace:
case ParsedAttr::AT_AddressSpace:
- HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
+ HandleAddressSpaceTypeAttribute(type, attr, state);
attr.setUsedAsTypeAttr();
break;
OBJC_POINTER_TYPE_ATTRS_CASELIST:
@@ -7313,12 +7409,18 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_LifetimeBound:
- if (TAL == TAL_DeclChunk) {
- HandleLifetimeBoundAttr(type, attr, state.getSema(),
- state.getDeclarator());
- attr.setUsedAsTypeAttr();
- }
+ if (TAL == TAL_DeclChunk)
+ HandleLifetimeBoundAttr(state, type, attr);
+ break;
+
+ case ParsedAttr::AT_NoDeref: {
+ ASTContext &Ctx = state.getSema().Context;
+ type = state.getAttributedType(createSimpleAttr<NoDerefAttr>(Ctx, attr),
+ type, type);
+ attr.setUsedAsTypeAttr();
+ state.setParsedNoDeref(true);
break;
+ }
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
@@ -7341,11 +7443,10 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
bool allowOnArrayType =
state.getDeclarator().isPrototypeContext() &&
!hasOuterPointerLikeChunk(state.getDeclarator(), endIndex);
- if (state.getSema().checkNullabilityTypeSpecifier(
+ if (checkNullabilityTypeSpecifier(
+ state,
type,
- mapNullabilityAttrKind(attr.getKind()),
- attr.getLoc(),
- attr.isContextSensitiveKeywordAttribute(),
+ attr,
allowOnArrayType)) {
attr.setInvalid();
}
@@ -7364,16 +7465,16 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case TAL_DeclName:
state.getSema().Diag(attr.getLoc(),
diag::err_objc_kindof_wrong_position)
- << FixItHint::CreateRemoval(attr.getLoc())
- << FixItHint::CreateInsertion(
- state.getDeclarator().getDeclSpec().getLocStart(), "__kindof ");
+ << FixItHint::CreateRemoval(attr.getLoc())
+ << FixItHint::CreateInsertion(
+ state.getDeclarator().getDeclSpec().getBeginLoc(),
+ "__kindof ");
break;
}
// Apply it regardless.
- if (state.getSema().checkObjCKindOfType(type, attr.getLoc()))
+ if (checkObjCKindOfType(state, type, attr))
attr.setInvalid();
- attr.setUsedAsTypeAttr();
break;
FUNCTION_TYPE_ATTRS_CASELIST:
@@ -7577,14 +7678,35 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
assert(D && "missing definition for pattern of instantiated definition");
*Suggested = D;
- if (isVisible(D))
+
+ auto DefinitionIsVisible = [&] {
+ // The (primary) definition might be in a visible module.
+ if (isVisible(D))
+ return true;
+
+ // A visible module might have a merged definition instead.
+ if (D->isModulePrivate() ? hasMergedDefinitionInCurrentModule(D)
+ : hasVisibleMergedDefinition(D)) {
+ if (CodeSynthesisContexts.empty() &&
+ !getLangOpts().ModulesLocalVisibility) {
+ // Cache the fact that this definition is implicitly visible because
+ // there is a visible merged definition.
+ D->setVisibleDespiteOwningModule();
+ }
+ return true;
+ }
+
+ return false;
+ };
+
+ if (DefinitionIsVisible())
return true;
// The external source may have additional definitions of this entity that are
// visible, so complete the redeclaration chain now and ask again.
if (auto *Source = Context.getExternalSource()) {
Source->CompleteRedeclChain(D);
- return isVisible(D);
+ return DefinitionIsVisible();
}
return false;
@@ -7684,39 +7806,24 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
return false;
}
- const TagType *Tag = T->getAs<TagType>();
- const ObjCInterfaceType *IFace = T->getAs<ObjCInterfaceType>();
+ TagDecl *Tag = dyn_cast_or_null<TagDecl>(Def);
+ ObjCInterfaceDecl *IFace = dyn_cast_or_null<ObjCInterfaceDecl>(Def);
- // If there's an unimported definition of this type in a module (for
- // instance, because we forward declared it, then imported the definition),
- // import that definition now.
- //
- // FIXME: What about other cases where an import extends a redeclaration
- // chain for a declaration that can be accessed through a mechanism other
- // than name lookup (eg, referenced in a template, or a variable whose type
- // could be completed by the module)?
- //
- // FIXME: Should we map through to the base array element type before
- // checking for a tag type?
+ // Give the external source a chance to provide a definition of the type.
+ // This is kept separate from completing the redeclaration chain so that
+ // external sources such as LLDB can avoid synthesizing a type definition
+ // unless it's actually needed.
if (Tag || IFace) {
- NamedDecl *D =
- Tag ? static_cast<NamedDecl *>(Tag->getDecl()) : IFace->getDecl();
-
// Avoid diagnosing invalid decls as incomplete.
- if (D->isInvalidDecl())
+ if (Def->isInvalidDecl())
return true;
// Give the external AST source a chance to complete the type.
if (auto *Source = Context.getExternalSource()) {
- if (Tag) {
- TagDecl *TagD = Tag->getDecl();
- if (TagD->hasExternalLexicalStorage())
- Source->CompleteType(TagD);
- } else {
- ObjCInterfaceDecl *IFaceD = IFace->getDecl();
- if (IFaceD->hasExternalLexicalStorage())
- Source->CompleteType(IFace->getDecl());
- }
+ if (Tag && Tag->hasExternalLexicalStorage())
+ Source->CompleteType(Tag);
+ if (IFace && IFace->hasExternalLexicalStorage())
+ Source->CompleteType(IFace);
// If the external source completed the type, go through the motions
// again to ensure we're allowed to use the completed type.
if (!T->isIncompleteType())
@@ -7727,32 +7834,31 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If we have a class template specialization or a class member of a
// class template specialization, or an array with known size of such,
// try to instantiate it.
- QualType MaybeTemplate = T;
- while (const ConstantArrayType *Array
- = Context.getAsConstantArrayType(MaybeTemplate))
- MaybeTemplate = Array->getElementType();
- if (const RecordType *Record = MaybeTemplate->getAs<RecordType>()) {
+ if (auto *RD = dyn_cast_or_null<CXXRecordDecl>(Tag)) {
bool Instantiated = false;
bool Diagnosed = false;
- if (ClassTemplateSpecializationDecl *ClassTemplateSpec
- = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (RD->isDependentContext()) {
+ // Don't try to instantiate a dependent class (eg, a member template of
+ // an instantiated class template specialization).
+ // FIXME: Can this ever happen?
+ } else if (auto *ClassTemplateSpec =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
Diagnosed = InstantiateClassTemplateSpecialization(
Loc, ClassTemplateSpec, TSK_ImplicitInstantiation,
/*Complain=*/Diagnoser);
Instantiated = true;
}
- } else if (CXXRecordDecl *Rec
- = dyn_cast<CXXRecordDecl>(Record->getDecl())) {
- CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
- if (!Rec->isBeingDefined() && Pattern) {
- MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo();
+ } else {
+ CXXRecordDecl *Pattern = RD->getInstantiatedFromMemberClass();
+ if (!RD->isBeingDefined() && Pattern) {
+ MemberSpecializationInfo *MSI = RD->getMemberSpecializationInfo();
assert(MSI && "Missing member specialization information?");
// This record was instantiated from a class within a template.
if (MSI->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization) {
- Diagnosed = InstantiateClass(Loc, Rec, Pattern,
- getTemplateInstantiationArgs(Rec),
+ Diagnosed = InstantiateClass(Loc, RD, Pattern,
+ getTemplateInstantiationArgs(RD),
TSK_ImplicitInstantiation,
/*Complain=*/Diagnoser);
Instantiated = true;
@@ -7783,15 +7889,15 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// If the type was a forward declaration of a class/struct/union
// type, produce a note.
- if (Tag && !Tag->getDecl()->isInvalidDecl())
- Diag(Tag->getDecl()->getLocation(),
+ if (Tag && !Tag->isInvalidDecl())
+ Diag(Tag->getLocation(),
Tag->isBeingDefined() ? diag::note_type_being_defined
: diag::note_forward_declaration)
- << QualType(Tag, 0);
+ << Context.getTagDeclType(Tag);
// If the Objective-C class was a forward declaration, produce a note.
- if (IFace && !IFace->getDecl()->isInvalidDecl())
- Diag(IFace->getDecl()->getLocation(), diag::note_forward_class);
+ if (IFace && !IFace->isInvalidDecl())
+ Diag(IFace->getLocation(), diag::note_forward_class);
// If we have external information that we can use to suggest a fix,
// produce a note.
@@ -7880,7 +7986,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
<< getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases();
for (const auto &I : RD->vbases())
- Diag(I.getLocStart(), diag::note_constexpr_virtual_base_here)
+ Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here)
<< I.getSourceRange();
} else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
!RD->hasTrivialDefaultConstructor()) {
@@ -7888,9 +7994,8 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
} else if (RD->hasNonLiteralTypeFieldsOrBases()) {
for (const auto &I : RD->bases()) {
if (!I.getType()->isLiteralType(Context)) {
- Diag(I.getLocStart(),
- diag::note_non_literal_base_class)
- << RD << I.getType() << I.getSourceRange();
+ Diag(I.getBeginLoc(), diag::note_non_literal_base_class)
+ << RD << I.getType() << I.getSourceRange();
return true;
}
}
@@ -7947,9 +8052,7 @@ QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
}
QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
- ExprResult ER = CheckPlaceholderExpr(E);
- if (ER.isInvalid()) return QualType();
- E = ER.get();
+ assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (!getLangOpts().CPlusPlus && E->refersToBitField())
Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2;
@@ -8034,9 +8137,7 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated) {
- ExprResult ER = CheckPlaceholderExpr(E);
- if (ER.isInvalid()) return QualType();
- E = ER.get();
+ assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (AsUnevaluated && CodeSynthesisContexts.empty() &&
E->HasSideEffects(Context, false)) {
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index de962d775d73..df14768cbe81 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -328,7 +328,7 @@ public:
/// other mechanism.
///
/// \returns the transformed statement.
- StmtResult TransformStmt(Stmt *S);
+ StmtResult TransformStmt(Stmt *S, bool DiscardedValue = false);
/// Transform the given statement.
///
@@ -597,7 +597,7 @@ public:
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals,
+ Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
bool TransformExceptionSpec(SourceLocation Loc,
@@ -684,15 +684,13 @@ public:
OMPClause *Transform ## Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
- /// Build a new qualified type given its unqualified type and type
- /// qualifiers.
+ /// Build a new qualified type given its unqualified type and type location.
///
/// By default, this routine adds type qualifiers only to types that can
/// have qualifiers, and silently suppresses those qualifiers that are not
/// permitted. Subclasses may override this routine to provide different
/// behavior.
- QualType RebuildQualifiedType(QualType T, SourceLocation Loc,
- Qualifiers Quals);
+ QualType RebuildQualifiedType(QualType T, QualifiedTypeLoc TL);
/// Build a new pointer type given its pointee type.
///
@@ -1798,14 +1796,16 @@ public:
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *
- RebuildOMPMapClause(OpenMPMapClauseKind MapTypeModifier,
+ RebuildOMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPMapClause(MapTypeModifier, MapType,
- IsMapTypeImplicit, MapLoc, ColonLoc,
- VarList, StartLoc, LParenLoc, EndLoc);
+ return getSema().ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc,
+ MapType, IsMapTypeImplicit, MapLoc,
+ ColonLoc, VarList, StartLoc,
+ LParenLoc, EndLoc);
}
/// Build a new OpenMP 'num_teams' clause.
@@ -2021,11 +2021,10 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc,
- SourceLocation CoawaitLoc,
- SourceLocation ColonLoc,
- Stmt *Range, Stmt *Begin, Stmt *End,
- Expr *Cond, Expr *Inc,
- Stmt *LoopVar,
+ SourceLocation CoawaitLoc, Stmt *Init,
+ SourceLocation ColonLoc, Stmt *Range,
+ Stmt *Begin, Stmt *End, Expr *Cond,
+ Expr *Inc, Stmt *LoopVar,
SourceLocation RParenLoc) {
// If we've just learned that the range is actually an Objective-C
// collection, treat this as an Objective-C fast enumeration loop.
@@ -2037,17 +2036,24 @@ public:
Expr *RangeExpr = RangeVar->getInit();
if (!RangeExpr->isTypeDependent() &&
- RangeExpr->getType()->isObjCObjectPointerType())
- return getSema().ActOnObjCForCollectionStmt(ForLoc, LoopVar, RangeExpr,
- RParenLoc);
+ RangeExpr->getType()->isObjCObjectPointerType()) {
+ // FIXME: Support init-statements in Objective-C++20 ranged for
+ // statement.
+ if (Init) {
+ return SemaRef.Diag(Init->getBeginLoc(),
+ diag::err_objc_for_range_init_stmt)
+ << Init->getSourceRange();
+ }
+ return getSema().ActOnObjCForCollectionStmt(ForLoc, LoopVar,
+ RangeExpr, RParenLoc);
+ }
}
}
}
- return getSema().BuildCXXForRangeStmt(ForLoc, CoawaitLoc, ColonLoc,
- Range, Begin, End,
- Cond, Inc, LoopVar, RParenLoc,
- Sema::BFRK_Rebuild);
+ return getSema().BuildCXXForRangeStmt(ForLoc, CoawaitLoc, Init, ColonLoc,
+ Range, Begin, End, Cond, Inc, LoopVar,
+ RParenLoc, Sema::BFRK_Rebuild);
}
/// Build a new C++0x range-based for statement.
@@ -2090,8 +2096,8 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentType IT) {
- return getSema().BuildPredefinedExpr(Loc, IT);
+ PredefinedExpr::IdentKind IK) {
+ return getSema().BuildPredefinedExpr(Loc, IK);
}
/// Build a new expression that references a declaration.
@@ -3124,15 +3130,15 @@ public:
// Build a reference to the __builtin_shufflevector builtin
FunctionDecl *Builtin = cast<FunctionDecl>(Lookup.front());
- Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, false,
- SemaRef.Context.BuiltinFnTy,
- VK_RValue, BuiltinLoc);
+ Expr *Callee = new (SemaRef.Context)
+ DeclRefExpr(SemaRef.Context, Builtin, false,
+ SemaRef.Context.BuiltinFnTy, VK_RValue, BuiltinLoc);
QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType());
Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy,
CK_BuiltinFnToFnPtr).get();
// Build the CallExpr
- ExprResult TheCall = new (SemaRef.Context) CallExpr(
+ ExprResult TheCall = CallExpr::Create(
SemaRef.Context, Callee, SubExprs, Builtin->getCallResultType(),
Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc);
@@ -3263,8 +3269,8 @@ private:
bool DeducibleTSTContext);
};
-template<typename Derived>
-StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S, bool DiscardedValue) {
if (!S)
return S;
@@ -3288,7 +3294,7 @@ StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
if (E.isInvalid())
return StmtError();
- return getSema().ActOnExprStmt(E);
+ return getSema().ActOnExprStmt(E, DiscardedValue);
}
}
@@ -3338,8 +3344,8 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
if (!Init)
return Init;
- if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
- Init = ExprTemp->getSubExpr();
+ if (auto *FE = dyn_cast<FullExpr>(Init))
+ Init = FE->getSubExpr();
if (auto *AIL = dyn_cast<ArrayInitLoopExpr>(Init))
Init = AIL->getCommonExpr();
@@ -3386,6 +3392,11 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
if (Construct && Construct->isStdInitListInitialization())
return TransformInitializer(Construct->getArg(0), NotCopyInit);
+ // Enter a list-init context if this was list initialization.
+ EnterExpressionEvaluationContext Context(
+ getSema(), EnterExpressionEvaluationContext::InitList,
+ Construct->isListInitialization());
+
SmallVector<Expr*, 8> NewArgs;
bool ArgChanged = false;
if (getDerived().TransformExprs(Construct->getArgs(), Construct->getNumArgs(),
@@ -3394,8 +3405,8 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
// If this was list initialization, revert to syntactic list form.
if (Construct->isListInitialization())
- return getDerived().RebuildInitList(Construct->getLocStart(), NewArgs,
- Construct->getLocEnd());
+ return getDerived().RebuildInitList(Construct->getBeginLoc(), NewArgs,
+ Construct->getEndLoc());
// Build a ParenListExpr to represent anything else.
SourceRange Parens = Construct->getParenOrBraceRange();
@@ -4217,8 +4228,9 @@ TreeTransform<Derived>::TransformTypeWithDeducedTST(TypeSourceInfo *DI) {
return nullptr;
if (QTL) {
- Result = getDerived().RebuildQualifiedType(
- Result, QTL.getBeginLoc(), QTL.getType().getLocalQualifiers());
+ Result = getDerived().RebuildQualifiedType(Result, QTL);
+ if (Result.isNull())
+ return nullptr;
TLB.TypeWasModifiedSafely(Result);
}
@@ -4229,13 +4241,14 @@ template<typename Derived>
QualType
TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
QualifiedTypeLoc T) {
- Qualifiers Quals = T.getType().getLocalQualifiers();
-
QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
if (Result.isNull())
return QualType();
- Result = getDerived().RebuildQualifiedType(Result, T.getBeginLoc(), Quals);
+ Result = getDerived().RebuildQualifiedType(Result, T);
+
+ if (Result.isNull())
+ return QualType();
// RebuildQualifiedType might have updated the type, but not in a way
// that invalidates the TypeLoc. (There's no location information for
@@ -4245,21 +4258,41 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
return Result;
}
-template<typename Derived>
+template <typename Derived>
QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
- SourceLocation Loc,
- Qualifiers Quals) {
+ QualifiedTypeLoc TL) {
+
+ SourceLocation Loc = TL.getBeginLoc();
+ Qualifiers Quals = TL.getType().getLocalQualifiers();
+
+ if (((T.getAddressSpace() != LangAS::Default &&
+ Quals.getAddressSpace() != LangAS::Default)) &&
+ T.getAddressSpace() != Quals.getAddressSpace()) {
+ SemaRef.Diag(Loc, diag::err_address_space_mismatch_templ_inst)
+ << TL.getType() << T;
+ return QualType();
+ }
+
// C++ [dcl.fct]p7:
// [When] adding cv-qualifications on top of the function type [...] the
// cv-qualifiers are ignored.
+ if (T->isFunctionType()) {
+ T = SemaRef.getASTContext().getAddrSpaceQualType(T,
+ Quals.getAddressSpace());
+ return T;
+ }
+
// C++ [dcl.ref]p1:
// when the cv-qualifiers are introduced through the use of a typedef-name
// or decltype-specifier [...] the cv-qualifiers are ignored.
// Note that [dcl.ref]p1 lists all cases in which cv-qualifiers can be
// applied to a reference type.
- // FIXME: This removes all qualifiers, not just cv-qualifiers!
- if (T->isFunctionType() || T->isReferenceType())
- return T;
+ if (T->isReferenceType()) {
+ // The only qualifier that applies to a reference type is restrict.
+ if (!Quals.hasRestrict())
+ return T;
+ Quals = Qualifiers::fromCVRMask(Qualifiers::Restrict);
+ }
// Suppress Objective-C lifetime qualifiers if they don't make sense for the
// resulting type.
@@ -4682,7 +4715,8 @@ TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
}
if (SizeResult.isInvalid())
return QualType();
- SizeResult = SemaRef.ActOnFinishFullExpr(SizeResult.get());
+ SizeResult =
+ SemaRef.ActOnFinishFullExpr(SizeResult.get(), /*DiscardedValue*/ false);
if (SizeResult.isInvalid())
return QualType();
@@ -5214,7 +5248,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
SmallVector<QualType, 4> ExceptionStorage;
TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
return getDerived().TransformFunctionProtoType(
- TLB, TL, nullptr, 0,
+ TLB, TL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
return This->TransformExceptionSpec(TL.getBeginLoc(), ESI,
ExceptionStorage, Changed);
@@ -5224,7 +5258,7 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
template<typename Derived> template<typename Fn>
QualType TreeTransform<Derived>::TransformFunctionProtoType(
TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext,
- unsigned ThisTypeQuals, Fn TransformExceptionSpec) {
+ Qualifiers ThisTypeQuals, Fn TransformExceptionSpec) {
// Transform the parameters and return type.
//
@@ -5267,6 +5301,13 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
if (ResultType.isNull())
return QualType();
+ // Return type can not be qualified with an address space.
+ if (ResultType.getAddressSpace() != LangAS::Default) {
+ SemaRef.Diag(TL.getReturnLoc().getBeginLoc(),
+ diag::err_attribute_address_function_type);
+ return QualType();
+ }
+
if (getDerived().TransformFunctionTypeParams(
TL.getBeginLoc(), TL.getParams(),
TL.getTypePtr()->param_type_begin(),
@@ -6058,6 +6099,12 @@ QualType TreeTransform<Derived>::TransformAttributedType(
if (modifiedType.isNull())
return QualType();
+ // oldAttr can be null if we started with a QualType rather than a TypeLoc.
+ const Attr *oldAttr = TL.getAttr();
+ const Attr *newAttr = oldAttr ? getDerived().TransformAttr(oldAttr) : nullptr;
+ if (oldAttr && !newAttr)
+ return QualType();
+
QualType result = TL.getType();
// FIXME: dependent operand expressions?
@@ -6074,26 +6121,20 @@ QualType TreeTransform<Derived>::TransformAttributedType(
// type sugar, and therefore cannot be diagnosed in any other way.
if (auto nullability = oldType->getImmediateNullability()) {
if (!modifiedType->canHaveNullability()) {
- SemaRef.Diag(TL.getAttrNameLoc(), diag::err_nullability_nonpointer)
- << DiagNullabilityKind(*nullability, false) << modifiedType;
+ SemaRef.Diag(TL.getAttr()->getLocation(),
+ diag::err_nullability_nonpointer)
+ << DiagNullabilityKind(*nullability, false) << modifiedType;
return QualType();
}
}
- result = SemaRef.Context.getAttributedType(oldType->getAttrKind(),
+ result = SemaRef.Context.getAttributedType(TL.getAttrKind(),
modifiedType,
equivalentType);
}
AttributedTypeLoc newTL = TLB.push<AttributedTypeLoc>(result);
- newTL.setAttrNameLoc(TL.getAttrNameLoc());
- if (TL.hasAttrOperand())
- newTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
- if (TL.hasAttrExprOperand())
- newTL.setAttrExprOperand(TL.getAttrExprOperand());
- else if (TL.hasAttrEnumOperand())
- newTL.setAttrEnumOperandLoc(TL.getAttrEnumOperandLoc());
-
+ newTL.setAttr(newAttr);
return result;
}
@@ -6411,16 +6452,10 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
if (getDerived().AlwaysRebuild() || AnyChanged) {
// Rebuild the type.
Result = getDerived().RebuildObjCObjectType(
- BaseType,
- TL.getLocStart(),
- TL.getTypeArgsLAngleLoc(),
- NewTypeArgInfos,
- TL.getTypeArgsRAngleLoc(),
- TL.getProtocolLAngleLoc(),
- llvm::makeArrayRef(TL.getTypePtr()->qual_begin(),
- TL.getNumProtocols()),
- TL.getProtocolLocs(),
- TL.getProtocolRAngleLoc());
+ BaseType, TL.getBeginLoc(), TL.getTypeArgsLAngleLoc(), NewTypeArgInfos,
+ TL.getTypeArgsRAngleLoc(), TL.getProtocolLAngleLoc(),
+ llvm::makeArrayRef(TL.getTypePtr()->qual_begin(), TL.getNumProtocols()),
+ TL.getProtocolLocs(), TL.getProtocolRAngleLoc());
if (Result.isNull())
return QualType();
@@ -6486,7 +6521,9 @@ TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool SubStmtChanged = false;
SmallVector<Stmt*, 8> Statements;
for (auto *B : S->body()) {
- StmtResult Result = getDerived().TransformStmt(B);
+ StmtResult Result =
+ getDerived().TransformStmt(B, !IsStmtExpr || B != S->body_back());
+
if (Result.isInvalid()) {
// Immediately fail if this was a DeclStmt, since it's very
// likely that this will cause problems for future statements.
@@ -6656,7 +6693,7 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
if (Then.isInvalid())
return StmtError();
} else {
- Then = new (getSema().Context) NullStmt(S->getThen()->getLocStart());
+ Then = new (getSema().Context) NullStmt(S->getThen()->getBeginLoc());
}
// Transform the "else" branch.
@@ -6759,6 +6796,9 @@ TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
+ if (getSema().getLangOpts().OpenMP)
+ getSema().startOpenMPLoop();
+
// Transform the initialization statement
StmtResult Init = getDerived().TransformStmt(S->getInit());
if (Init.isInvalid())
@@ -6875,7 +6915,7 @@ TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
if (!getDerived().AlwaysRebuild() && !DeclChanged)
return S;
- return getDerived().RebuildDeclStmt(Decls, S->getStartLoc(), S->getEndLoc());
+ return getDerived().RebuildDeclStmt(Decls, S->getBeginLoc(), S->getEndLoc());
}
template<typename Derived>
@@ -7407,6 +7447,11 @@ StmtResult TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
+ StmtResult Init =
+ S->getInit() ? getDerived().TransformStmt(S->getInit()) : StmtResult();
+ if (Init.isInvalid())
+ return StmtError();
+
StmtResult Range = getDerived().TransformStmt(S->getRangeStmt());
if (Range.isInvalid())
return StmtError();
@@ -7440,6 +7485,7 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
StmtResult NewStmt = S;
if (getDerived().AlwaysRebuild() ||
+ Init.get() != S->getInit() ||
Range.get() != S->getRangeStmt() ||
Begin.get() != S->getBeginStmt() ||
End.get() != S->getEndStmt() ||
@@ -7447,7 +7493,7 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
Inc.get() != S->getInc() ||
LoopVar.get() != S->getLoopVarStmt()) {
NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
- S->getCoawaitLoc(),
+ S->getCoawaitLoc(), Init.get(),
S->getColonLoc(), Range.get(),
Begin.get(), End.get(),
Cond.get(),
@@ -7465,7 +7511,7 @@ TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
// it now so we have a new statement to attach the body to.
if (Body.get() != S->getBody() && NewStmt.get() == S) {
NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
- S->getCoawaitLoc(),
+ S->getCoawaitLoc(), Init.get(),
S->getColonLoc(), Range.get(),
Begin.get(), End.get(),
Cond.get(),
@@ -7708,7 +7754,7 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
return getDerived().RebuildOMPExecutableDirective(
D->getDirectiveKind(), DirName, CancelRegion, TClauses,
- AssociatedStmt.get(), D->getLocStart(), D->getLocEnd());
+ AssociatedStmt.get(), D->getBeginLoc(), D->getEndLoc());
}
template <typename Derived>
@@ -7716,7 +7762,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7727,7 +7773,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_simd, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7738,7 +7784,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_for, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7749,7 +7795,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPForSimdDirective(OMPForSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_for_simd, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7760,7 +7806,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPSectionsDirective(OMPSectionsDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_sections, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7771,7 +7817,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPSectionDirective(OMPSectionDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_section, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7782,7 +7828,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPSingleDirective(OMPSingleDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_single, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7793,7 +7839,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPMasterDirective(OMPMasterDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_master, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7803,7 +7849,7 @@ template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPCriticalDirective(OMPCriticalDirective *D) {
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_critical, D->getDirectiveName(), nullptr, D->getLocStart());
+ OMPD_critical, D->getDirectiveName(), nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7814,7 +7860,7 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelForDirective(
OMPParallelForDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7825,7 +7871,7 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelForSimdDirective(
OMPParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for_simd, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7836,7 +7882,7 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_sections, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7847,7 +7893,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPTaskDirective(OMPTaskDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_task, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7858,7 +7904,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTaskyieldDirective(
OMPTaskyieldDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_taskyield, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7869,7 +7915,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPBarrierDirective(OMPBarrierDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_barrier, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7880,7 +7926,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_taskwait, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7891,7 +7937,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTaskgroupDirective(
OMPTaskgroupDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_taskgroup, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7902,7 +7948,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_flush, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7913,7 +7959,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7924,7 +7970,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPAtomicDirective(OMPAtomicDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_atomic, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7935,7 +7981,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7946,7 +7992,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetDataDirective(
OMPTargetDataDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_data, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7957,7 +8003,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_enter_data, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7968,7 +8014,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetExitDataDirective(
OMPTargetExitDataDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_exit_data, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7979,7 +8025,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetParallelDirective(
OMPTargetParallelDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -7990,7 +8036,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_for, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8001,7 +8047,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetUpdateDirective(
OMPTargetUpdateDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_update, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8012,7 +8058,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_teams, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8023,7 +8069,7 @@ StmtResult TreeTransform<Derived>::TransformOMPCancellationPointDirective(
OMPCancellationPointDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_cancellation_point, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8034,7 +8080,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPCancelDirective(OMPCancelDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_cancel, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8045,7 +8091,7 @@ StmtResult
TreeTransform<Derived>::TransformOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8056,7 +8102,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTaskLoopSimdDirective(
OMPTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_taskloop_simd, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8067,7 +8113,7 @@ StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
OMPDistributeDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8078,7 +8124,7 @@ StmtResult TreeTransform<Derived>::TransformOMPDistributeParallelForDirective(
OMPDistributeParallelForDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_distribute_parallel_for, DirName, nullptr, D->getLocStart());
+ OMPD_distribute_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8090,7 +8136,7 @@ TreeTransform<Derived>::TransformOMPDistributeParallelForSimdDirective(
OMPDistributeParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_distribute_parallel_for_simd, DirName, nullptr, D->getLocStart());
+ OMPD_distribute_parallel_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8101,7 +8147,7 @@ StmtResult TreeTransform<Derived>::TransformOMPDistributeSimdDirective(
OMPDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_distribute_simd, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8111,9 +8157,8 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetParallelForSimdDirective(
OMPTargetParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_for_simd,
- DirName, nullptr,
- D->getLocStart());
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_target_parallel_for_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8124,7 +8169,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetSimdDirective(
OMPTargetSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_simd, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8135,7 +8180,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeDirective(
OMPTeamsDistributeDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_distribute, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8146,7 +8191,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeSimdDirective(
OMPTeamsDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_teams_distribute_simd, DirName, nullptr, D->getLocStart());
+ OMPD_teams_distribute_simd, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8157,7 +8202,8 @@ StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForSimdDir
OMPTeamsDistributeParallelForSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_teams_distribute_parallel_for_simd, DirName, nullptr, D->getLocStart());
+ OMPD_teams_distribute_parallel_for_simd, DirName, nullptr,
+ D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8167,8 +8213,8 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForDirective(
OMPTeamsDistributeParallelForDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_distribute_parallel_for,
- DirName, nullptr, D->getLocStart());
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_teams_distribute_parallel_for, DirName, nullptr, D->getBeginLoc());
StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8179,7 +8225,7 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDirective(
OMPTargetTeamsDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams, DirName,
- nullptr, D->getLocStart());
+ nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8189,8 +8235,8 @@ template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDistributeDirective(
OMPTargetTeamsDistributeDirective *D) {
DeclarationNameInfo DirName;
- getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams_distribute,
- DirName, nullptr, D->getLocStart());
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_target_teams_distribute, DirName, nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8203,7 +8249,7 @@ TreeTransform<Derived>::TransformOMPTargetTeamsDistributeParallelForDirective(
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
OMPD_target_teams_distribute_parallel_for, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8216,7 +8262,7 @@ StmtResult TreeTransform<Derived>::
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
OMPD_target_teams_distribute_parallel_for_simd, DirName, nullptr,
- D->getLocStart());
+ D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8228,7 +8274,7 @@ TreeTransform<Derived>::TransformOMPTargetTeamsDistributeSimdDirective(
OMPTargetTeamsDistributeSimdDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(
- OMPD_target_teams_distribute_simd, DirName, nullptr, D->getLocStart());
+ OMPD_target_teams_distribute_simd, DirName, nullptr, D->getBeginLoc());
auto Res = getDerived().TransformOMPExecutableDirective(D);
getDerived().getSema().EndOpenMPDSABlock(Res.get());
return Res;
@@ -8244,8 +8290,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPIfClause(OMPIfClause *C) {
if (Cond.isInvalid())
return nullptr;
return getDerived().RebuildOMPIfClause(
- C->getNameModifier(), Cond.get(), C->getLocStart(), C->getLParenLoc(),
- C->getNameModifierLoc(), C->getColonLoc(), C->getLocEnd());
+ C->getNameModifier(), Cond.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getNameModifierLoc(), C->getColonLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8253,8 +8299,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPFinalClause(OMPFinalClause *C) {
ExprResult Cond = getDerived().TransformExpr(C->getCondition());
if (Cond.isInvalid())
return nullptr;
- return getDerived().RebuildOMPFinalClause(Cond.get(), C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPFinalClause(Cond.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8264,7 +8310,7 @@ TreeTransform<Derived>::TransformOMPNumThreadsClause(OMPNumThreadsClause *C) {
if (NumThreads.isInvalid())
return nullptr;
return getDerived().RebuildOMPNumThreadsClause(
- NumThreads.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ NumThreads.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8274,7 +8320,7 @@ TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPSafelenClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8284,7 +8330,7 @@ TreeTransform<Derived>::TransformOMPSimdlenClause(OMPSimdlenClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPSimdlenClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8294,23 +8340,23 @@ TreeTransform<Derived>::TransformOMPCollapseClause(OMPCollapseClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPCollapseClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPDefaultClause(OMPDefaultClause *C) {
return getDerived().RebuildOMPDefaultClause(
- C->getDefaultKind(), C->getDefaultKindKwLoc(), C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ C->getDefaultKind(), C->getDefaultKindKwLoc(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPProcBindClause(OMPProcBindClause *C) {
return getDerived().RebuildOMPProcBindClause(
- C->getProcBindKind(), C->getProcBindKindKwLoc(), C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ C->getProcBindKind(), C->getProcBindKindKwLoc(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8321,9 +8367,9 @@ TreeTransform<Derived>::TransformOMPScheduleClause(OMPScheduleClause *C) {
return nullptr;
return getDerived().RebuildOMPScheduleClause(
C->getFirstScheduleModifier(), C->getSecondScheduleModifier(),
- C->getScheduleKind(), E.get(), C->getLocStart(), C->getLParenLoc(),
+ C->getScheduleKind(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
C->getFirstScheduleModifierLoc(), C->getSecondScheduleModifierLoc(),
- C->getScheduleKindLoc(), C->getCommaLoc(), C->getLocEnd());
+ C->getScheduleKindLoc(), C->getCommaLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8335,7 +8381,7 @@ TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) {
if (E.isInvalid())
return nullptr;
}
- return getDerived().RebuildOMPOrderedClause(C->getLocStart(), C->getLocEnd(),
+ return getDerived().RebuildOMPOrderedClause(C->getBeginLoc(), C->getEndLoc(),
C->getLParenLoc(), E.get());
}
@@ -8414,6 +8460,39 @@ TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUnifiedAddressClause(
+ OMPUnifiedAddressClause *C) {
+ llvm_unreachable("unified_address clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *C) {
+ llvm_unreachable(
+ "unified_shared_memory clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPReverseOffloadClause(
+ OMPReverseOffloadClause *C) {
+ llvm_unreachable("reverse_offload clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPDynamicAllocatorsClause(
+ OMPDynamicAllocatorsClause *C) {
+ llvm_unreachable(
+ "dynamic_allocators clause cannot appear in dependent context");
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ llvm_unreachable(
+ "atomic_default_mem_order clause cannot appear in dependent context");
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -8425,7 +8504,7 @@ TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPPrivateClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8440,7 +8519,7 @@ OMPClause *TreeTransform<Derived>::TransformOMPFirstprivateClause(
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPFirstprivateClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8455,7 +8534,7 @@ TreeTransform<Derived>::TransformOMPLastprivateClause(OMPLastprivateClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPLastprivateClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8469,8 +8548,8 @@ TreeTransform<Derived>::TransformOMPSharedClause(OMPSharedClause *C) {
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPSharedClause(Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPSharedClause(Vars, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8516,8 +8595,8 @@ TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) {
UnresolvedReductions.push_back(nullptr);
}
return getDerived().RebuildOMPReductionClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getColonLoc(),
- C->getLocEnd(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
+ C->getEndLoc(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
}
template <typename Derived>
@@ -8561,8 +8640,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPTaskReductionClause(
UnresolvedReductions.push_back(nullptr);
}
return getDerived().RebuildOMPTaskReductionClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getColonLoc(),
- C->getLocEnd(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
+ C->getEndLoc(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
}
template <typename Derived>
@@ -8606,8 +8685,8 @@ TreeTransform<Derived>::TransformOMPInReductionClause(OMPInReductionClause *C) {
UnresolvedReductions.push_back(nullptr);
}
return getDerived().RebuildOMPInReductionClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getColonLoc(),
- C->getLocEnd(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getColonLoc(),
+ C->getEndLoc(), ReductionIdScopeSpec, NameInfo, UnresolvedReductions);
}
template <typename Derived>
@@ -8625,8 +8704,8 @@ TreeTransform<Derived>::TransformOMPLinearClause(OMPLinearClause *C) {
if (Step.isInvalid())
return nullptr;
return getDerived().RebuildOMPLinearClause(
- Vars, Step.get(), C->getLocStart(), C->getLParenLoc(), C->getModifier(),
- C->getModifierLoc(), C->getColonLoc(), C->getLocEnd());
+ Vars, Step.get(), C->getBeginLoc(), C->getLParenLoc(), C->getModifier(),
+ C->getModifierLoc(), C->getColonLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8644,8 +8723,8 @@ TreeTransform<Derived>::TransformOMPAlignedClause(OMPAlignedClause *C) {
if (Alignment.isInvalid())
return nullptr;
return getDerived().RebuildOMPAlignedClause(
- Vars, Alignment.get(), C->getLocStart(), C->getLParenLoc(),
- C->getColonLoc(), C->getLocEnd());
+ Vars, Alignment.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getColonLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8659,8 +8738,8 @@ TreeTransform<Derived>::TransformOMPCopyinClause(OMPCopyinClause *C) {
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPCopyinClause(Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPCopyinClause(Vars, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8675,7 +8754,7 @@ TreeTransform<Derived>::TransformOMPCopyprivateClause(OMPCopyprivateClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPCopyprivateClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8688,8 +8767,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) {
return nullptr;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPFlushClause(Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPFlushClause(Vars, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8705,7 +8784,7 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
}
return getDerived().RebuildOMPDependClause(
C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(), Vars,
- C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8714,8 +8793,8 @@ TreeTransform<Derived>::TransformOMPDeviceClause(OMPDeviceClause *C) {
ExprResult E = getDerived().TransformExpr(C->getDevice());
if (E.isInvalid())
return nullptr;
- return getDerived().RebuildOMPDeviceClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPDeviceClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8729,9 +8808,9 @@ OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPMapClause(
- C->getMapTypeModifier(), C->getMapType(), C->isImplicitMapType(),
- C->getMapLoc(), C->getColonLoc(), Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(), C->getMapType(),
+ C->isImplicitMapType(), C->getMapLoc(), C->getColonLoc(), Vars,
+ C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8741,7 +8820,7 @@ TreeTransform<Derived>::TransformOMPNumTeamsClause(OMPNumTeamsClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPNumTeamsClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8751,7 +8830,7 @@ TreeTransform<Derived>::TransformOMPThreadLimitClause(OMPThreadLimitClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPThreadLimitClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8761,7 +8840,7 @@ TreeTransform<Derived>::TransformOMPPriorityClause(OMPPriorityClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPPriorityClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8771,7 +8850,7 @@ TreeTransform<Derived>::TransformOMPGrainsizeClause(OMPGrainsizeClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPGrainsizeClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8781,7 +8860,7 @@ TreeTransform<Derived>::TransformOMPNumTasksClause(OMPNumTasksClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPNumTasksClause(
- E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8789,8 +8868,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPHintClause(OMPHintClause *C) {
ExprResult E = getDerived().TransformExpr(C->getHint());
if (E.isInvalid())
return nullptr;
- return getDerived().RebuildOMPHintClause(E.get(), C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPHintClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8800,8 +8879,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPDistScheduleClause(
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPDistScheduleClause(
- C->getDistScheduleKind(), E.get(), C->getLocStart(), C->getLParenLoc(),
- C->getDistScheduleKindLoc(), C->getCommaLoc(), C->getLocEnd());
+ C->getDistScheduleKind(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getDistScheduleKindLoc(), C->getCommaLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8820,8 +8899,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPToClause(OMPToClause *C) {
return 0;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPToClause(Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPToClause(Vars, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8834,8 +8913,8 @@ OMPClause *TreeTransform<Derived>::TransformOMPFromClause(OMPFromClause *C) {
return 0;
Vars.push_back(EVar.get());
}
- return getDerived().RebuildOMPFromClause(Vars, C->getLocStart(),
- C->getLParenLoc(), C->getLocEnd());
+ return getDerived().RebuildOMPFromClause(Vars, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8850,7 +8929,7 @@ OMPClause *TreeTransform<Derived>::TransformOMPUseDevicePtrClause(
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPUseDevicePtrClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -8865,7 +8944,7 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPIsDevicePtrClause(
- Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd());
+ Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
//===----------------------------------------------------------------------===//
@@ -8873,12 +8952,18 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
//===----------------------------------------------------------------------===//
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformConstantExpr(ConstantExpr *E) {
+ return TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
return getDerived().RebuildPredefinedExpr(E->getLocation(),
- E->getIdentType());
+ E->getIdentKind());
}
template<typename Derived>
@@ -8973,7 +9058,7 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
if (FunctionDecl *FD = E->getDirectCallee())
- SemaRef.MarkFunctionReferenced(E->getLocStart(), FD);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), FD);
return SemaRef.MaybeBindToTemporary(E);
}
@@ -9227,10 +9312,9 @@ TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
RHS.get() == E->getRHS())
return E;
- return getDerived().RebuildArraySubscriptExpr(LHS.get(),
- /*FIXME:*/E->getLHS()->getLocStart(),
- RHS.get(),
- E->getRBracketLoc());
+ return getDerived().RebuildArraySubscriptExpr(
+ LHS.get(),
+ /*FIXME:*/ E->getLHS()->getBeginLoc(), RHS.get(), E->getRBracketLoc());
}
template <typename Derived>
@@ -9259,7 +9343,7 @@ TreeTransform<Derived>::TransformOMPArraySectionExpr(OMPArraySectionExpr *E) {
return E;
return getDerived().RebuildOMPArraySectionExpr(
- Base.get(), E->getBase()->getLocEnd(), LowerBound.get(), E->getColonLoc(),
+ Base.get(), E->getBase()->getEndLoc(), LowerBound.get(), E->getColonLoc(),
Length.get(), E->getRBracketLoc());
}
@@ -9512,9 +9596,9 @@ TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) {
// type-as-written, but that's okay, because it should always be
// derivable from the initializer.
- return getDerived().RebuildCompoundLiteralExpr(E->getLParenLoc(), NewT,
- /*FIXME:*/E->getInitializer()->getLocEnd(),
- Init.get());
+ return getDerived().RebuildCompoundLiteralExpr(
+ E->getLParenLoc(), NewT,
+ /*FIXME:*/ E->getInitializer()->getEndLoc(), Init.get());
}
template<typename Derived>
@@ -9530,7 +9614,7 @@ TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) {
// FIXME: Bad source location
SourceLocation FakeOperatorLoc =
- SemaRef.getLocForEndOfToken(E->getBase()->getLocEnd());
+ SemaRef.getLocForEndOfToken(E->getBase()->getEndLoc());
return getDerived().RebuildExtVectorElementExpr(Base.get(), FakeOperatorLoc,
E->getAccessorLoc(),
E->getAccessor());
@@ -9544,6 +9628,9 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
bool InitChanged = false;
+ EnterExpressionEvaluationContext Context(
+ getSema(), EnterExpressionEvaluationContext::InitList);
+
SmallVector<Expr*, 4> Inits;
if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
Inits, &InitChanged))
@@ -9676,7 +9763,7 @@ template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformImplicitValueInitExpr(
ImplicitValueInitExpr *E) {
- TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
+ TemporaryBase Rebase(*this, E->getBeginLoc(), DeclarationName());
// FIXME: Will we ever have proper type location here? Will we actually
// need to transform the type?
@@ -9818,7 +9905,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
// FIXME: Poor location information
SourceLocation FakeLParenLoc = SemaRef.getLocForEndOfToken(
- static_cast<Expr *>(Object.get())->getLocEnd());
+ static_cast<Expr *>(Object.get())->getEndLoc());
// Transform the call arguments.
SmallVector<Expr*, 8> Args;
@@ -9826,9 +9913,8 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Args))
return ExprError();
- return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc,
- Args,
- E->getLocEnd());
+ return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc, Args,
+ E->getEndLoc());
}
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
@@ -10008,10 +10094,8 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
TInfo == E->getTypeOperandSourceInfo())
return E;
- return getDerived().RebuildCXXTypeidExpr(E->getType(),
- E->getLocStart(),
- TInfo,
- E->getLocEnd());
+ return getDerived().RebuildCXXTypeidExpr(E->getType(), E->getBeginLoc(),
+ TInfo, E->getEndLoc());
}
// We don't know whether the subexpression is potentially evaluated until
@@ -10030,10 +10114,8 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
SubExpr.get() == E->getExprOperand())
return E;
- return getDerived().RebuildCXXTypeidExpr(E->getType(),
- E->getLocStart(),
- SubExpr.get(),
- E->getLocEnd());
+ return getDerived().RebuildCXXTypeidExpr(E->getType(), E->getBeginLoc(),
+ SubExpr.get(), E->getEndLoc());
}
template<typename Derived>
@@ -10049,10 +10131,8 @@ TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
TInfo == E->getTypeOperandSourceInfo())
return E;
- return getDerived().RebuildCXXUuidofExpr(E->getType(),
- E->getLocStart(),
- TInfo,
- E->getLocEnd());
+ return getDerived().RebuildCXXUuidofExpr(E->getType(), E->getBeginLoc(),
+ TInfo, E->getEndLoc());
}
EnterExpressionEvaluationContext Unevaluated(
@@ -10066,10 +10146,8 @@ TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
SubExpr.get() == E->getExprOperand())
return E;
- return getDerived().RebuildCXXUuidofExpr(E->getType(),
- E->getLocStart(),
- SubExpr.get(),
- E->getLocEnd());
+ return getDerived().RebuildCXXUuidofExpr(E->getType(), E->getBeginLoc(),
+ SubExpr.get(), E->getEndLoc());
}
template<typename Derived>
@@ -10092,11 +10170,11 @@ TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
if (!getDerived().AlwaysRebuild() && T == E->getType()) {
// Make sure that we capture 'this'.
- getSema().CheckCXXThisCapture(E->getLocStart());
+ getSema().CheckCXXThisCapture(E->getBeginLoc());
return E;
}
- return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
+ return getDerived().RebuildCXXThisExpr(E->getBeginLoc(), T, E->isImplicit());
}
template<typename Derived>
@@ -10117,9 +10195,8 @@ TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
- ParmVarDecl *Param
- = cast_or_null<ParmVarDecl>(getDerived().TransformDecl(E->getLocStart(),
- E->getParam()));
+ ParmVarDecl *Param = cast_or_null<ParmVarDecl>(
+ getDerived().TransformDecl(E->getBeginLoc(), E->getParam()));
if (!Param)
return ExprError();
@@ -10133,9 +10210,8 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
- FieldDecl *Field
- = cast_or_null<FieldDecl>(getDerived().TransformDecl(E->getLocStart(),
- E->getField()));
+ FieldDecl *Field = cast_or_null<FieldDecl>(
+ getDerived().TransformDecl(E->getBeginLoc(), E->getField()));
if (!Field)
return ExprError();
@@ -10196,8 +10272,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
FunctionDecl *OperatorNew = nullptr;
if (E->getOperatorNew()) {
OperatorNew = cast_or_null<FunctionDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getOperatorNew()));
+ getDerived().TransformDecl(E->getBeginLoc(), E->getOperatorNew()));
if (!OperatorNew)
return ExprError();
}
@@ -10205,8 +10280,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
FunctionDecl *OperatorDelete = nullptr;
if (E->getOperatorDelete()) {
OperatorDelete = cast_or_null<FunctionDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getOperatorDelete()));
+ getDerived().TransformDecl(E->getBeginLoc(), E->getOperatorDelete()));
if (!OperatorDelete)
return ExprError();
}
@@ -10221,9 +10295,9 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Mark any declarations we need as referenced.
// FIXME: instantiation-specific.
if (OperatorNew)
- SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), OperatorNew);
if (OperatorDelete)
- SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), OperatorDelete);
if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
QualType ElementType
@@ -10231,7 +10305,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
- SemaRef.MarkFunctionReferenced(E->getLocStart(), Destructor);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Destructor);
}
}
}
@@ -10253,7 +10327,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
= dyn_cast<ConstantArrayType>(ArrayT)) {
ArraySize = IntegerLiteral::Create(SemaRef.Context, ConsArrayT->getSize(),
SemaRef.Context.getSizeType(),
- /*FIXME:*/ E->getLocStart());
+ /*FIXME:*/ E->getBeginLoc());
AllocType = ConsArrayT->getElementType();
} else if (const DependentSizedArrayType *DepArrayT
= dyn_cast<DependentSizedArrayType>(ArrayT)) {
@@ -10264,17 +10338,11 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
}
}
- return getDerived().RebuildCXXNewExpr(E->getLocStart(),
- E->isGlobalNew(),
- /*FIXME:*/E->getLocStart(),
- PlacementArgs,
- /*FIXME:*/E->getLocStart(),
- E->getTypeIdParens(),
- AllocType,
- AllocTypeInfo,
- ArraySize.get(),
- E->getDirectInitRange(),
- NewInit.get());
+ return getDerived().RebuildCXXNewExpr(
+ E->getBeginLoc(), E->isGlobalNew(),
+ /*FIXME:*/ E->getBeginLoc(), PlacementArgs,
+ /*FIXME:*/ E->getBeginLoc(), E->getTypeIdParens(), AllocType,
+ AllocTypeInfo, ArraySize.get(), E->getDirectInitRange(), NewInit.get());
}
template<typename Derived>
@@ -10288,8 +10356,7 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
FunctionDecl *OperatorDelete = nullptr;
if (E->getOperatorDelete()) {
OperatorDelete = cast_or_null<FunctionDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getOperatorDelete()));
+ getDerived().TransformDecl(E->getBeginLoc(), E->getOperatorDelete()));
if (!OperatorDelete)
return ExprError();
}
@@ -10300,14 +10367,14 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
// Mark any declarations we need as referenced.
// FIXME: instantiation-specific.
if (OperatorDelete)
- SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), OperatorDelete);
if (!E->getArgument()->isTypeDependent()) {
QualType Destroyed = SemaRef.Context.getBaseElementType(
E->getDestroyedType());
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- SemaRef.MarkFunctionReferenced(E->getLocStart(),
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(),
SemaRef.LookupDestructor(Record));
}
}
@@ -10315,10 +10382,8 @@ TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
return E;
}
- return getDerived().RebuildCXXDeleteExpr(E->getLocStart(),
- E->isGlobalDelete(),
- E->isArrayForm(),
- Operand.get());
+ return getDerived().RebuildCXXDeleteExpr(
+ E->getBeginLoc(), E->isGlobalDelete(), E->isArrayForm(), Operand.get());
}
template<typename Derived>
@@ -10651,10 +10716,8 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
if (!getDerived().AlwaysRebuild() && !ArgChanged)
return E;
- return getDerived().RebuildTypeTrait(E->getTrait(),
- E->getLocStart(),
- Args,
- E->getLocEnd());
+ return getDerived().RebuildTypeTrait(E->getTrait(), E->getBeginLoc(), Args,
+ E->getEndLoc());
}
template<typename Derived>
@@ -10680,11 +10743,8 @@ TreeTransform<Derived>::TransformArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
return E;
}
- return getDerived().RebuildArrayTypeTrait(E->getTrait(),
- E->getLocStart(),
- T,
- SubExpr.get(),
- E->getLocEnd());
+ return getDerived().RebuildArrayTypeTrait(E->getTrait(), E->getBeginLoc(), T,
+ SubExpr.get(), E->getEndLoc());
}
template<typename Derived>
@@ -10702,8 +10762,8 @@ TreeTransform<Derived>::TransformExpressionTraitExpr(ExpressionTraitExpr *E) {
return E;
}
- return getDerived().RebuildExpressionTrait(
- E->getTrait(), E->getLocStart(), SubExpr.get(), E->getLocEnd());
+ return getDerived().RebuildExpressionTrait(E->getTrait(), E->getBeginLoc(),
+ SubExpr.get(), E->getEndLoc());
}
template <typename Derived>
@@ -10789,24 +10849,27 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
!E->isListInitialization())
return getDerived().TransformExpr(E->getArg(0));
- TemporaryBase Rebase(*this, /*FIXME*/E->getLocStart(), DeclarationName());
+ TemporaryBase Rebase(*this, /*FIXME*/ E->getBeginLoc(), DeclarationName());
QualType T = getDerived().TransformType(E->getType());
if (T.isNull())
return ExprError();
- CXXConstructorDecl *Constructor
- = cast_or_null<CXXConstructorDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getConstructor()));
+ CXXConstructorDecl *Constructor = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getBeginLoc(), E->getConstructor()));
if (!Constructor)
return ExprError();
bool ArgumentChanged = false;
SmallVector<Expr*, 8> Args;
- if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
- &ArgumentChanged))
- return ExprError();
+ {
+ EnterExpressionEvaluationContext Context(
+ getSema(), EnterExpressionEvaluationContext::InitList,
+ E->isListInitialization());
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+ }
if (!getDerived().AlwaysRebuild() &&
T == E->getType() &&
@@ -10814,19 +10877,15 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
!ArgumentChanged) {
// Mark the constructor as referenced.
// FIXME: Instantiation-specific
- SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Constructor);
return E;
}
- return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(),
- Constructor,
- E->isElidable(), Args,
- E->hadMultipleCandidates(),
- E->isListInitialization(),
- E->isStdInitListInitialization(),
- E->requiresZeroInitialization(),
- E->getConstructionKind(),
- E->getParenOrBraceRange());
+ return getDerived().RebuildCXXConstructExpr(
+ T, /*FIXME:*/ E->getBeginLoc(), Constructor, E->isElidable(), Args,
+ E->hadMultipleCandidates(), E->isListInitialization(),
+ E->isStdInitListInitialization(), E->requiresZeroInitialization(),
+ E->getConstructionKind(), E->getParenOrBraceRange());
}
template<typename Derived>
@@ -10837,7 +10896,7 @@ ExprResult TreeTransform<Derived>::TransformCXXInheritedCtorInitExpr(
return ExprError();
CXXConstructorDecl *Constructor = cast_or_null<CXXConstructorDecl>(
- getDerived().TransformDecl(E->getLocStart(), E->getConstructor()));
+ getDerived().TransformDecl(E->getBeginLoc(), E->getConstructor()));
if (!Constructor)
return ExprError();
@@ -10846,7 +10905,7 @@ ExprResult TreeTransform<Derived>::TransformCXXInheritedCtorInitExpr(
Constructor == E->getConstructor()) {
// Mark the constructor as referenced.
// FIXME: Instantiation-specific
- SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Constructor);
return E;
}
@@ -10885,26 +10944,29 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
if (!T)
return ExprError();
- CXXConstructorDecl *Constructor
- = cast_or_null<CXXConstructorDecl>(
- getDerived().TransformDecl(E->getLocStart(),
- E->getConstructor()));
+ CXXConstructorDecl *Constructor = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getBeginLoc(), E->getConstructor()));
if (!Constructor)
return ExprError();
bool ArgumentChanged = false;
SmallVector<Expr*, 8> Args;
Args.reserve(E->getNumArgs());
- if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
- &ArgumentChanged))
- return ExprError();
+ {
+ EnterExpressionEvaluationContext Context(
+ getSema(), EnterExpressionEvaluationContext::InitList,
+ E->isListInitialization());
+ if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+ }
if (!getDerived().AlwaysRebuild() &&
T == E->getTypeSourceInfo() &&
Constructor == E->getConstructor() &&
!ArgumentChanged) {
// FIXME: Instantiation-specific
- SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(), Constructor);
return SemaRef.MaybeBindToTemporary(E);
}
@@ -10912,7 +10974,7 @@ TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
// prepared to handle list-initialization without a child InitListExpr.
SourceLocation LParenLoc = T->getTypeLoc().getEndLoc();
return getDerived().RebuildCXXTemporaryObjectExpr(
- T, LParenLoc, Args, E->getLocEnd(),
+ T, LParenLoc, Args, E->getEndLoc(),
/*ListInitialization=*/LParenLoc.isInvalid());
}
@@ -10970,7 +11032,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
SmallVector<QualType, 4> ExceptionStorage;
TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
QualType NewCallOpType = TransformFunctionProtoType(
- NewCallOpTLBuilder, OldCallOpFPTL, nullptr, 0,
+ NewCallOpTLBuilder, OldCallOpFPTL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
ExceptionStorage, Changed);
@@ -10996,7 +11058,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Build the call operator.
CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
Class, E->getIntroducerRange(), NewCallOpTSI,
- E->getCallOperator()->getLocEnd(),
+ E->getCallOperator()->getEndLoc(),
NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
E->getCallOperator()->isConstexpr());
@@ -11160,7 +11222,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
if (Body.isInvalid()) {
SavedContext.pop();
- getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/nullptr,
+ getSema().ActOnLambdaError(E->getBeginLoc(), /*CurScope=*/nullptr,
/*IsInstantiation=*/true);
return ExprError();
}
@@ -11173,7 +11235,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
/*IsInstantiation*/ true);
SavedContext.pop();
- return getSema().BuildLambdaExpr(E->getLocStart(), Body.get()->getLocEnd(),
+ return getSema().BuildLambdaExpr(E->getBeginLoc(), Body.get()->getEndLoc(),
&LSICopy);
}
@@ -11189,9 +11251,14 @@ TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
bool ArgumentChanged = false;
SmallVector<Expr*, 8> Args;
Args.reserve(E->arg_size());
- if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
- &ArgumentChanged))
- return ExprError();
+ {
+ EnterExpressionEvaluationContext Context(
+ getSema(), EnterExpressionEvaluationContext::InitList,
+ E->isListInitialization());
+ if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+ }
if (!getDerived().AlwaysRebuild() &&
T == E->getTypeSourceInfo() &&
@@ -11620,8 +11687,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return E;
return getDerived().RebuildCXXFoldExpr(
- E->getLocStart(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
- RHS.get(), E->getLocEnd());
+ E->getBeginLoc(), LHS.get(), E->getOperator(), E->getEllipsisLoc(),
+ RHS.get(), E->getEndLoc());
}
// The transform has determined that we should perform an elementwise
@@ -11641,8 +11708,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return true;
Result = getDerived().RebuildCXXFoldExpr(
- E->getLocStart(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
- Result.get(), E->getLocEnd());
+ E->getBeginLoc(), Out.get(), E->getOperator(), E->getEllipsisLoc(),
+ Result.get(), E->getEndLoc());
if (Result.isInvalid())
return true;
}
@@ -11657,11 +11724,9 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
if (Out.get()->containsUnexpandedParameterPack()) {
// We still have a pack; retain a pack expansion for this slice.
Result = getDerived().RebuildCXXFoldExpr(
- E->getLocStart(),
- LeftFold ? Result.get() : Out.get(),
+ E->getBeginLoc(), LeftFold ? Result.get() : Out.get(),
E->getOperator(), E->getEllipsisLoc(),
- LeftFold ? Out.get() : Result.get(),
- E->getLocEnd());
+ LeftFold ? Out.get() : Result.get(), E->getEndLoc());
} else if (Result.isUsable()) {
// We've got down to a single element; build a binary operator.
Result = getDerived().RebuildBinaryOperator(
@@ -11685,9 +11750,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return true;
Result = getDerived().RebuildCXXFoldExpr(
- E->getLocStart(), Result.get(),
- E->getOperator(), E->getEllipsisLoc(),
- Out.get(), E->getLocEnd());
+ E->getBeginLoc(), Result.get(), E->getOperator(), E->getEllipsisLoc(),
+ Out.get(), E->getEndLoc());
if (Result.isInvalid())
return true;
}
@@ -11775,13 +11839,11 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
bool RetainExpansion = false;
Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
Optional<unsigned> NumExpansions = OrigNumExpansions;
- SourceRange PatternRange(OrigElement.Key->getLocStart(),
- OrigElement.Value->getLocEnd());
- if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
- PatternRange,
- Unexpanded,
- Expand, RetainExpansion,
- NumExpansions))
+ SourceRange PatternRange(OrigElement.Key->getBeginLoc(),
+ OrigElement.Value->getEndLoc());
+ if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
+ PatternRange, Unexpanded, Expand,
+ RetainExpansion, NumExpansions))
return ExprError();
if (!Expand) {
@@ -12651,9 +12713,8 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
if (Op == OO_Subscript) {
if (!First->getType()->isOverloadableType() &&
!Second->getType()->isOverloadableType())
- return getSema().CreateBuiltinArraySubscriptExpr(First,
- Callee->getLocStart(),
- Second, OpLoc);
+ return getSema().CreateBuiltinArraySubscriptExpr(
+ First, Callee->getBeginLoc(), Second, OpLoc);
} else if (Op == OO_Arrow) {
// -> is never a builtin operation.
return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc);
@@ -12727,8 +12788,8 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
RBrace = SourceLocation::getFromRawEncoding(
NameLoc.CXXOperatorName.EndOpNameLoc);
} else {
- LBrace = Callee->getLocStart();
- RBrace = OpLoc;
+ LBrace = Callee->getBeginLoc();
+ RBrace = OpLoc;
}
return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
@@ -12799,7 +12860,7 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCapturedStmt(CapturedStmt *S) {
- SourceLocation Loc = S->getLocStart();
+ SourceLocation Loc = S->getBeginLoc();
CapturedDecl *CD = S->getCapturedDecl();
unsigned NumParams = CD->getNumParams();
unsigned ContextParamPos = CD->getContextParamPosition();
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index da482717f450..ca826d83d471 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -213,6 +213,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
ID = PREDEF_TYPE_SAMPLER_ID;
break;
@@ -383,6 +388,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::ClassScopeFunctionSpecialization:
case Decl::Import:
case Decl::OMPThreadPrivate:
+ case Decl::OMPRequires:
case Decl::OMPCapturedExpr:
case Decl::OMPDeclareReduction:
case Decl::BuiltinTemplate:
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 723839ff62bf..e0b2b24a0d32 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -61,7 +61,6 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/Version.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
@@ -81,6 +80,7 @@
#include "clang/Serialization/Module.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/Serialization/SerializationDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
@@ -396,8 +396,8 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts,
ExistingTargetOpts.FeaturesAsWritten.end());
SmallVector<StringRef, 4> ReadFeatures(TargetOpts.FeaturesAsWritten.begin(),
TargetOpts.FeaturesAsWritten.end());
- llvm::sort(ExistingFeatures.begin(), ExistingFeatures.end());
- llvm::sort(ReadFeatures.begin(), ReadFeatures.end());
+ llvm::sort(ExistingFeatures);
+ llvm::sort(ReadFeatures);
// We compute the set difference in both directions explicitly so that we can
// diagnose the differences differently.
@@ -908,7 +908,7 @@ static bool isInterestingIdentifier(ASTReader &Reader, IdentifierInfo &II,
(IsModule ? II.hasRevertedBuiltin() : II.getObjCOrBuiltinID()) ||
II.hasRevertedTokenIDToIdentifier() ||
(!(IsModule && Reader.getPreprocessor().getLangOpts().CPlusPlus) &&
- II.getFETokenInfo<void>());
+ II.getFETokenInfo());
}
static bool readBit(unsigned &Bits) {
@@ -3780,22 +3780,15 @@ void ASTReader::makeModuleVisible(Module *Mod,
/// visible.
void ASTReader::mergeDefinitionVisibility(NamedDecl *Def,
NamedDecl *MergedDef) {
- // FIXME: This doesn't correctly handle the case where MergedDef is visible
- // in modules other than its owning module. We should instead give the
- // ASTContext a list of merged definitions for Def.
if (Def->isHidden()) {
// If MergedDef is visible or becomes visible, make the definition visible.
if (!MergedDef->isHidden())
Def->setVisibleDespiteOwningModule();
- else if (getContext().getLangOpts().ModulesLocalVisibility) {
+ else {
getContext().mergeDefinitionIntoModule(
Def, MergedDef->getImportedOwningModule(),
/*NotifyListeners*/ false);
PendingMergedDefinitionsToDeduplicate.insert(Def);
- } else {
- auto SubmoduleID = MergedDef->getOwningModuleID();
- assert(SubmoduleID && "hidden definition in no module");
- HiddenNamesMap[getSubmodule(SubmoduleID)].push_back(Def);
}
}
}
@@ -4868,11 +4861,11 @@ bool ASTReader::readASTFileControlBlock(
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
- Idx += 5; // ImportLoc, Size, ModTime, Signature
- SkipString(Record, Idx); // Module name; FIXME: pass to listener?
+ Idx += 1+1+1+1+5; // Kind, ImportLoc, Size, ModTime, Signature
+ std::string ModuleName = ReadString(Record, Idx);
std::string Filename = ReadString(Record, Idx);
ResolveImportedPath(Filename, ModuleDir);
- Listener.visitImport(Filename);
+ Listener.visitImport(ModuleName, Filename);
}
break;
}
@@ -5395,7 +5388,6 @@ bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
PPOpts.UsePredefines = Record[Idx++];
PPOpts.DetailedRecord = Record[Idx++];
PPOpts.ImplicitPCHInclude = ReadString(Record, Idx);
- PPOpts.ImplicitPTHInclude = ReadString(Record, Idx);
PPOpts.ObjCXXARCStandardLibrary =
static_cast<ObjCXXARCStandardLibraryKind>(Record[Idx++]);
SuggestedPredefines.clear();
@@ -6058,7 +6050,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
EPI.Variadic = Record[Idx++];
EPI.HasTrailingReturn = Record[Idx++];
- EPI.TypeQuals = Record[Idx++];
+ EPI.TypeQuals = Qualifiers::fromOpaqueValue(Record[Idx++]);
EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
SmallVector<QualType, 8> ExceptionStorage;
readExceptionSpec(*Loc.F, ExceptionStorage, EPI.ExceptionSpec, Record, Idx);
@@ -6455,6 +6447,10 @@ class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
return Reader->ReadNestedNameSpecifierLoc(*F, Record, Idx);
}
+ Attr *ReadAttr() {
+ return Reader->ReadAttr(*F, Record, Idx);
+ }
+
public:
TypeLocReader(ModuleFile &F, ASTReader &Reader,
const ASTReader::RecordData &Record, unsigned &Idx)
@@ -6646,20 +6642,7 @@ void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
}
void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- TL.setAttrNameLoc(ReadSourceLocation());
- if (TL.hasAttrOperand()) {
- SourceRange range;
- range.setBegin(ReadSourceLocation());
- range.setEnd(ReadSourceLocation());
- TL.setAttrOperandParensRange(range);
- }
- if (TL.hasAttrExprOperand()) {
- if (Record[Idx++])
- TL.setAttrExprOperand(Reader->ReadExpr(*F));
- else
- TL.setAttrExprOperand(nullptr);
- } else if (TL.hasAttrEnumOperand())
- TL.setAttrEnumOperandLoc(ReadSourceLocation());
+ TL.setAttr(ReadAttr());
}
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
@@ -6979,6 +6962,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.Id##Ty; \
+ break;
+#include "clang/Basic/OpenCLExtensionTypes.def"
case PREDEF_TYPE_SAMPLER_ID:
T = Context.OCLSamplerTy;
break;
@@ -9206,8 +9194,7 @@ void ASTReader::ReadComments() {
NextCursor:
// De-serialized SourceLocations get negative FileIDs for other modules,
// potentially invalidating the original order. Sort it again.
- llvm::sort(Comments.begin(), Comments.end(),
- BeforeThanCompare<RawComment>(SourceMgr));
+ llvm::sort(Comments, BeforeThanCompare<RawComment>(SourceMgr));
Context.Comments.addDeserializedComments(Comments);
}
}
@@ -9254,7 +9241,7 @@ std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) {
}
void ASTReader::finishPendingActions() {
- while (!PendingIdentifierInfos.empty() ||
+ while (!PendingIdentifierInfos.empty() || !PendingFunctionTypes.empty() ||
!PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
!PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
!PendingUpdateRecords.empty()) {
@@ -9273,6 +9260,21 @@ void ASTReader::finishPendingActions() {
SetGloballyVisibleDecls(II, DeclIDs, &TopLevelDecls[II]);
}
+ // Load each function type that we deferred loading because it was a
+ // deduced type that might refer to a local type declared within itself.
+ for (unsigned I = 0; I != PendingFunctionTypes.size(); ++I) {
+ auto *FD = PendingFunctionTypes[I].first;
+ FD->setType(GetType(PendingFunctionTypes[I].second));
+
+ // If we gave a function a deduced return type, remember that we need to
+ // propagate that along the redeclaration chain.
+ auto *DT = FD->getReturnType()->getContainedDeducedType();
+ if (DT && DT->isDeduced())
+ PendingDeducedTypeUpdates.insert(
+ {FD->getCanonicalDecl(), FD->getReturnType()});
+ }
+ PendingFunctionTypes.clear();
+
// For each decl chain that we wanted to complete while deserializing, mark
// it as "still needs to be completed".
for (unsigned I = 0; I != PendingIncompleteDeclChains.size(); ++I) {
@@ -9282,7 +9284,8 @@ void ASTReader::finishPendingActions() {
// Load pending declaration chains.
for (unsigned I = 0; I != PendingDeclChains.size(); ++I)
- loadPendingDeclChain(PendingDeclChains[I].first, PendingDeclChains[I].second);
+ loadPendingDeclChain(PendingDeclChains[I].first,
+ PendingDeclChains[I].second);
PendingDeclChains.clear();
// Make the most recent of the top-level declarations visible.
@@ -9677,8 +9680,8 @@ void ASTReader::diagnoseOdrViolations() {
unsigned NumBases = DD->NumBases;
if (NumBases == 0) return SourceRange();
auto bases = DD->bases();
- return SourceRange(bases[0].getLocStart(),
- bases[NumBases - 1].getLocEnd());
+ return SourceRange(bases[0].getBeginLoc(),
+ bases[NumBases - 1].getEndLoc());
};
if (FirstNumBases != SecondNumBases) {
@@ -10179,10 +10182,10 @@ void ASTReader::diagnoseOdrViolations() {
unsigned FirstODRHash = ComputeODRHash(FirstExpr);
unsigned SecondODRHash = ComputeODRHash(SecondExpr);
if (FirstODRHash != SecondODRHash) {
- ODRDiagError(FirstExpr->getLocStart(), FirstExpr->getSourceRange(),
+ ODRDiagError(FirstExpr->getBeginLoc(), FirstExpr->getSourceRange(),
StaticAssertCondition);
- ODRDiagNote(SecondExpr->getLocStart(),
- SecondExpr->getSourceRange(), StaticAssertCondition);
+ ODRDiagNote(SecondExpr->getBeginLoc(), SecondExpr->getSourceRange(),
+ StaticAssertCondition);
Diagnosed = true;
break;
}
@@ -10194,17 +10197,17 @@ void ASTReader::diagnoseOdrViolations() {
SourceLocation FirstLoc, SecondLoc;
SourceRange FirstRange, SecondRange;
if (FirstStr) {
- FirstLoc = FirstStr->getLocStart();
+ FirstLoc = FirstStr->getBeginLoc();
FirstRange = FirstStr->getSourceRange();
} else {
- FirstLoc = FirstSA->getLocStart();
+ FirstLoc = FirstSA->getBeginLoc();
FirstRange = FirstSA->getSourceRange();
}
if (SecondStr) {
- SecondLoc = SecondStr->getLocStart();
+ SecondLoc = SecondStr->getBeginLoc();
SecondRange = SecondStr->getSourceRange();
} else {
- SecondLoc = SecondSA->getLocStart();
+ SecondLoc = SecondSA->getBeginLoc();
SecondRange = SecondSA->getSourceRange();
}
ODRDiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
@@ -10217,9 +10220,9 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstStr && SecondStr &&
FirstStr->getString() != SecondStr->getString()) {
- ODRDiagError(FirstStr->getLocStart(), FirstStr->getSourceRange(),
+ ODRDiagError(FirstStr->getBeginLoc(), FirstStr->getSourceRange(),
StaticAssertMessage);
- ODRDiagNote(SecondStr->getLocStart(), SecondStr->getSourceRange(),
+ ODRDiagNote(SecondStr->getBeginLoc(), SecondStr->getSourceRange(),
StaticAssertMessage);
Diagnosed = true;
break;
@@ -11531,11 +11534,16 @@ void ASTReader::FinishedDeserializing() {
--NumCurrentElementsDeserializing;
if (NumCurrentElementsDeserializing == 0) {
- // Propagate exception specification updates along redeclaration chains.
- while (!PendingExceptionSpecUpdates.empty()) {
- auto Updates = std::move(PendingExceptionSpecUpdates);
+ // Propagate exception specification and deduced type updates along
+ // redeclaration chains.
+ //
+ // We do this now rather than in finishPendingActions because we want to
+ // be able to walk the complete redeclaration chains of the updated decls.
+ while (!PendingExceptionSpecUpdates.empty() ||
+ !PendingDeducedTypeUpdates.empty()) {
+ auto ESUpdates = std::move(PendingExceptionSpecUpdates);
PendingExceptionSpecUpdates.clear();
- for (auto Update : Updates) {
+ for (auto Update : ESUpdates) {
ProcessingUpdatesRAIIObj ProcessingUpdates(*this);
auto *FPT = Update.second->getType()->castAs<FunctionProtoType>();
auto ESI = FPT->getExtProtoInfo().ExceptionSpec;
@@ -11544,6 +11552,15 @@ void ASTReader::FinishedDeserializing() {
for (auto *Redecl : Update.second->redecls())
getContext().adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
}
+
+ auto DTUpdates = std::move(PendingDeducedTypeUpdates);
+ PendingDeducedTypeUpdates.clear();
+ for (auto Update : DTUpdates) {
+ ProcessingUpdatesRAIIObj ProcessingUpdates(*this);
+ // FIXME: If the return type is already deduced, check that it matches.
+ getContext().adjustDeducedFunctionResultType(Update.first,
+ Update.second);
+ }
}
if (ReadTimer)
@@ -11637,3 +11654,910 @@ unsigned ASTRecordReader::readRecord(llvm::BitstreamCursor &Cursor,
Record.clear();
return Cursor.readRecord(AbbrevID, Record);
}
+//===----------------------------------------------------------------------===//
+//// OMPClauseReader implementation
+////===----------------------------------------------------------------------===//
+
+OMPClause *OMPClauseReader::readClause() {
+ OMPClause *C;
+ switch (Record.readInt()) {
+ case OMPC_if:
+ C = new (Context) OMPIfClause();
+ break;
+ case OMPC_final:
+ C = new (Context) OMPFinalClause();
+ break;
+ case OMPC_num_threads:
+ C = new (Context) OMPNumThreadsClause();
+ break;
+ case OMPC_safelen:
+ C = new (Context) OMPSafelenClause();
+ break;
+ case OMPC_simdlen:
+ C = new (Context) OMPSimdlenClause();
+ break;
+ case OMPC_collapse:
+ C = new (Context) OMPCollapseClause();
+ break;
+ case OMPC_default:
+ C = new (Context) OMPDefaultClause();
+ break;
+ case OMPC_proc_bind:
+ C = new (Context) OMPProcBindClause();
+ break;
+ case OMPC_schedule:
+ C = new (Context) OMPScheduleClause();
+ break;
+ case OMPC_ordered:
+ C = OMPOrderedClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_nowait:
+ C = new (Context) OMPNowaitClause();
+ break;
+ case OMPC_untied:
+ C = new (Context) OMPUntiedClause();
+ break;
+ case OMPC_mergeable:
+ C = new (Context) OMPMergeableClause();
+ break;
+ case OMPC_read:
+ C = new (Context) OMPReadClause();
+ break;
+ case OMPC_write:
+ C = new (Context) OMPWriteClause();
+ break;
+ case OMPC_update:
+ C = new (Context) OMPUpdateClause();
+ break;
+ case OMPC_capture:
+ C = new (Context) OMPCaptureClause();
+ break;
+ case OMPC_seq_cst:
+ C = new (Context) OMPSeqCstClause();
+ break;
+ case OMPC_threads:
+ C = new (Context) OMPThreadsClause();
+ break;
+ case OMPC_simd:
+ C = new (Context) OMPSIMDClause();
+ break;
+ case OMPC_nogroup:
+ C = new (Context) OMPNogroupClause();
+ break;
+ case OMPC_unified_address:
+ C = new (Context) OMPUnifiedAddressClause();
+ break;
+ case OMPC_unified_shared_memory:
+ C = new (Context) OMPUnifiedSharedMemoryClause();
+ break;
+ case OMPC_reverse_offload:
+ C = new (Context) OMPReverseOffloadClause();
+ break;
+ case OMPC_dynamic_allocators:
+ C = new (Context) OMPDynamicAllocatorsClause();
+ break;
+ case OMPC_atomic_default_mem_order:
+ C = new (Context) OMPAtomicDefaultMemOrderClause();
+ break;
+ case OMPC_private:
+ C = OMPPrivateClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_firstprivate:
+ C = OMPFirstprivateClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_lastprivate:
+ C = OMPLastprivateClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_shared:
+ C = OMPSharedClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_reduction:
+ C = OMPReductionClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_task_reduction:
+ C = OMPTaskReductionClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_in_reduction:
+ C = OMPInReductionClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_linear:
+ C = OMPLinearClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_aligned:
+ C = OMPAlignedClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_copyin:
+ C = OMPCopyinClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_copyprivate:
+ C = OMPCopyprivateClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_flush:
+ C = OMPFlushClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case OMPC_depend: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumLoops = Record.readInt();
+ C = OMPDependClause::CreateEmpty(Context, NumVars, NumLoops);
+ break;
+ }
+ case OMPC_device:
+ C = new (Context) OMPDeviceClause();
+ break;
+ case OMPC_map: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumDeclarations = Record.readInt();
+ unsigned NumLists = Record.readInt();
+ unsigned NumComponents = Record.readInt();
+ C = OMPMapClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
+ NumComponents);
+ break;
+ }
+ case OMPC_num_teams:
+ C = new (Context) OMPNumTeamsClause();
+ break;
+ case OMPC_thread_limit:
+ C = new (Context) OMPThreadLimitClause();
+ break;
+ case OMPC_priority:
+ C = new (Context) OMPPriorityClause();
+ break;
+ case OMPC_grainsize:
+ C = new (Context) OMPGrainsizeClause();
+ break;
+ case OMPC_num_tasks:
+ C = new (Context) OMPNumTasksClause();
+ break;
+ case OMPC_hint:
+ C = new (Context) OMPHintClause();
+ break;
+ case OMPC_dist_schedule:
+ C = new (Context) OMPDistScheduleClause();
+ break;
+ case OMPC_defaultmap:
+ C = new (Context) OMPDefaultmapClause();
+ break;
+ case OMPC_to: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumDeclarations = Record.readInt();
+ unsigned NumLists = Record.readInt();
+ unsigned NumComponents = Record.readInt();
+ C = OMPToClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
+ NumComponents);
+ break;
+ }
+ case OMPC_from: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumDeclarations = Record.readInt();
+ unsigned NumLists = Record.readInt();
+ unsigned NumComponents = Record.readInt();
+ C = OMPFromClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
+ NumComponents);
+ break;
+ }
+ case OMPC_use_device_ptr: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumDeclarations = Record.readInt();
+ unsigned NumLists = Record.readInt();
+ unsigned NumComponents = Record.readInt();
+ C = OMPUseDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
+ NumLists, NumComponents);
+ break;
+ }
+ case OMPC_is_device_ptr: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumDeclarations = Record.readInt();
+ unsigned NumLists = Record.readInt();
+ unsigned NumComponents = Record.readInt();
+ C = OMPIsDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
+ NumLists, NumComponents);
+ break;
+ }
+ }
+ Visit(C);
+ C->setLocStart(Record.readSourceLocation());
+ C->setLocEnd(Record.readSourceLocation());
+
+ return C;
+}
+
+void OMPClauseReader::VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C) {
+ C->setPreInitStmt(Record.readSubStmt(),
+ static_cast<OpenMPDirectiveKind>(Record.readInt()));
+}
+
+void OMPClauseReader::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setPostUpdateExpr(Record.readSubExpr());
+}
+
+void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setNameModifier(static_cast<OpenMPDirectiveKind>(Record.readInt()));
+ C->setNameModifierLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ C->setCondition(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPFinalClause(OMPFinalClause *C) {
+ C->setCondition(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setNumThreads(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
+ C->setSafelen(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ C->setSimdlen(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
+ C->setNumForLoops(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
+ C->setDefaultKind(
+ static_cast<OpenMPDefaultClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDefaultKindKwLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
+ C->setProcBindKind(
+ static_cast<OpenMPProcBindClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setProcBindKindKwLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setScheduleKind(
+ static_cast<OpenMPScheduleClauseKind>(Record.readInt()));
+ C->setFirstScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record.readInt()));
+ C->setSecondScheduleModifier(
+ static_cast<OpenMPScheduleClauseModifier>(Record.readInt()));
+ C->setChunkSize(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setFirstScheduleModifierLoc(Record.readSourceLocation());
+ C->setSecondScheduleModifierLoc(Record.readSourceLocation());
+ C->setScheduleKindLoc(Record.readSourceLocation());
+ C->setCommaLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ C->setNumForLoops(Record.readSubExpr());
+ for (unsigned I = 0, E = C->NumberOfLoops; I < E; ++I)
+ C->setLoopNumIterations(I, Record.readSubExpr());
+ for (unsigned I = 0, E = C->NumberOfLoops; I < E; ++I)
+ C->setLoopCounter(I, Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
+
+void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
+
+void OMPClauseReader::VisitOMPMergeableClause(OMPMergeableClause *) {}
+
+void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
+void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
+void OMPClauseReader::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
+
+void OMPClauseReader::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {}
+
+void OMPClauseReader::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {}
+
+void
+OMPClauseReader::VisitOMPDynamicAllocatorsClause(OMPDynamicAllocatorsClause *) {
+}
+
+void OMPClauseReader::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ C->setAtomicDefaultMemOrderKind(
+ static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setAtomicDefaultMemOrderKindKwLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivateCopies(Vars);
+}
+
+void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInits(Vars);
+}
+
+void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
+ VisitOMPClauseWithPostUpdate(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setSourceExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setDestinationExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setAssignmentOps(Vars);
+}
+
+void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
+ VisitOMPClauseWithPostUpdate(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ NestedNameSpecifierLoc NNSL = Record.readNestedNameSpecifierLoc();
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setQualifierLoc(NNSL);
+ C->setNameInfo(DNI);
+
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setLHSExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setRHSExprs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setReductionOps(Vars);
+}
+
+void OMPClauseReader::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
+ VisitOMPClauseWithPostUpdate(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ NestedNameSpecifierLoc NNSL = Record.readNestedNameSpecifierLoc();
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setQualifierLoc(NNSL);
+ C->setNameInfo(DNI);
+
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setLHSExprs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setRHSExprs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setReductionOps(Vars);
+}
+
+void OMPClauseReader::VisitOMPInReductionClause(OMPInReductionClause *C) {
+ VisitOMPClauseWithPostUpdate(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ NestedNameSpecifierLoc NNSL = Record.readNestedNameSpecifierLoc();
+ DeclarationNameInfo DNI;
+ Record.readDeclarationNameInfo(DNI);
+ C->setQualifierLoc(NNSL);
+ C->setNameInfo(DNI);
+
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setLHSExprs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setRHSExprs(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setReductionOps(Vars);
+ Vars.clear();
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setTaskgroupDescriptors(Vars);
+}
+
+void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
+ VisitOMPClauseWithPostUpdate(C);
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ C->setModifier(static_cast<OpenMPLinearClauseKind>(Record.readInt()));
+ C->setModifierLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInits(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setUpdates(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setFinals(Vars);
+ C->setStep(Record.readSubExpr());
+ C->setCalcStep(Record.readSubExpr());
+}
+
+void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ C->setAlignment(Record.readSubExpr());
+}
+
+void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Exprs;
+ Exprs.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setVarRefs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setSourceExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setDestinationExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setAssignmentOps(Exprs);
+}
+
+void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Exprs;
+ Exprs.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setVarRefs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setSourceExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setDestinationExprs(Exprs);
+ Exprs.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Exprs.push_back(Record.readSubExpr());
+ C->setAssignmentOps(Exprs);
+}
+
+void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+}
+
+void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDependencyKind(
+ static_cast<OpenMPDependClauseKind>(Record.readInt()));
+ C->setDependencyLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ C->setLoopData(I, Record.readSubExpr());
+}
+
+void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setDevice(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ C->setMapTypeModifier(
+ I, static_cast<OpenMPMapModifierKind>(Record.readInt()));
+ C->setMapTypeModifierLoc(I, Record.readSourceLocation());
+ }
+ C->setMapType(
+ static_cast<OpenMPMapClauseKind>(Record.readInt()));
+ C->setMapLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
+void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setNumTeams(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setThreadLimit(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ C->setPriority(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ C->setGrainsize(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ C->setNumTasks(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPHintClause(OMPHintClause *C) {
+ C->setHint(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPDistScheduleClause(OMPDistScheduleClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setDistScheduleKind(
+ static_cast<OpenMPDistScheduleClauseKind>(Record.readInt()));
+ C->setChunkSize(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDistScheduleKindLoc(Record.readSourceLocation());
+ C->setCommaLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
+ C->setDefaultmapKind(
+ static_cast<OpenMPDefaultmapClauseKind>(Record.readInt()));
+ C->setDefaultmapModifier(
+ static_cast<OpenMPDefaultmapClauseModifier>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDefaultmapModifierLoc(Record.readSourceLocation());
+ C->setDefaultmapKindLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
+void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
+void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setInits(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
+
+void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
+}
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index ad37a2978df1..763ab527570d 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -30,6 +30,7 @@
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
@@ -47,7 +48,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Sema/IdentifierResolver.h"
-#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ContinuousRangeMap.h"
@@ -274,7 +274,7 @@ namespace clang {
if (auto &Old = LazySpecializations) {
IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]);
- llvm::sort(IDs.begin(), IDs.end());
+ llvm::sort(IDs);
IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end());
}
@@ -446,6 +446,7 @@ namespace clang {
void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
+ void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
};
@@ -506,8 +507,8 @@ void ASTDeclReader::ReadFunctionDefinition(FunctionDecl *FD) {
if (Record.readInt())
Reader.DefinitionSource[FD] = Loc.F->Kind == ModuleKind::MK_MainFile;
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- CD->NumCtorInitializers = Record.readInt();
- if (CD->NumCtorInitializers)
+ CD->setNumCtorInitializers(Record.readInt());
+ if (CD->getNumCtorInitializers())
CD->CtorInitializers = ReadGlobalOffset();
}
// Store the offset of the body so we can lazily load it later.
@@ -541,9 +542,6 @@ void ASTDeclReader::Visit(Decl *D) {
// if we have a fully initialized TypeDecl, we can safely read its type now.
ID->TypeForDecl = Reader.GetType(DeferredTypeID).getTypePtrOrNull();
} else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (DeferredTypeID)
- FD->setType(Reader.GetType(DeferredTypeID));
-
// FunctionDecl's body was written last after all other Stmts/Exprs.
// We only read it if FD doesn't already have a body (e.g., from another
// module).
@@ -742,16 +740,16 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
ED->setPromotionType(Record.readType());
ED->setNumPositiveBits(Record.readInt());
ED->setNumNegativeBits(Record.readInt());
- ED->IsScoped = Record.readInt();
- ED->IsScopedUsingClassTag = Record.readInt();
- ED->IsFixed = Record.readInt();
+ ED->setScoped(Record.readInt());
+ ED->setScopedUsingClassTag(Record.readInt());
+ ED->setFixed(Record.readInt());
- ED->HasODRHash = true;
+ ED->setHasODRHash(true);
ED->ODRHash = Record.readInt();
// If this is a definition subject to the ODR, and we already have a
// definition, merge this one into it.
- if (ED->IsCompleteDefinition &&
+ if (ED->isCompleteDefinition() &&
Reader.getContext().getLangOpts().Modules &&
Reader.getContext().getLangOpts().CPlusPlus) {
EnumDecl *&OldDef = Reader.EnumDefinitions[ED->getCanonicalDecl()];
@@ -767,7 +765,7 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
}
if (OldDef) {
Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
- ED->IsCompleteDefinition = false;
+ ED->setCompleteDefinition(false);
Reader.mergeDefinitionVisibility(OldDef, ED);
if (OldDef->getODRHash() != ED->getODRHash())
Reader.PendingEnumOdrMergeFailures[OldDef].push_back(ED);
@@ -844,10 +842,11 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// We'll set up the real type in Visit, once we've finished loading the
// function.
FD->setType(FD->getTypeSourceInfo()->getType());
+ Reader.PendingFunctionTypes.push_back({FD, DeferredTypeID});
} else {
FD->setType(Reader.GetType(DeferredTypeID));
- DeferredTypeID = 0;
}
+ DeferredTypeID = 0;
ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName());
FD->IdentifierNamespace = Record.readInt();
@@ -855,30 +854,31 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
// FunctionDecl's body is handled last at ASTDeclReader::Visit,
// after everything else is read.
- FD->SClass = (StorageClass)Record.readInt();
- FD->IsInline = Record.readInt();
- FD->IsInlineSpecified = Record.readInt();
- FD->IsExplicitSpecified = Record.readInt();
- FD->IsVirtualAsWritten = Record.readInt();
- FD->IsPure = Record.readInt();
- FD->HasInheritedPrototype = Record.readInt();
- FD->HasWrittenPrototype = Record.readInt();
- FD->IsDeleted = Record.readInt();
- FD->IsTrivial = Record.readInt();
- FD->IsTrivialForCall = Record.readInt();
- FD->IsDefaulted = Record.readInt();
- FD->IsExplicitlyDefaulted = Record.readInt();
- FD->HasImplicitReturnZero = Record.readInt();
- FD->IsConstexpr = Record.readInt();
- FD->UsesSEHTry = Record.readInt();
- FD->HasSkippedBody = Record.readInt();
- FD->IsMultiVersion = Record.readInt();
- FD->IsLateTemplateParsed = Record.readInt();
- FD->setCachedLinkage(Linkage(Record.readInt()));
+ FD->setStorageClass(static_cast<StorageClass>(Record.readInt()));
+ FD->setInlineSpecified(Record.readInt());
+ FD->setImplicitlyInline(Record.readInt());
+ FD->setExplicitSpecified(Record.readInt());
+ FD->setVirtualAsWritten(Record.readInt());
+ FD->setPure(Record.readInt());
+ FD->setHasInheritedPrototype(Record.readInt());
+ FD->setHasWrittenPrototype(Record.readInt());
+ FD->setDeletedAsWritten(Record.readInt());
+ FD->setTrivial(Record.readInt());
+ FD->setTrivialForCall(Record.readInt());
+ FD->setDefaulted(Record.readInt());
+ FD->setExplicitlyDefaulted(Record.readInt());
+ FD->setHasImplicitReturnZero(Record.readInt());
+ FD->setConstexpr(Record.readInt());
+ FD->setUsesSEHTry(Record.readInt());
+ FD->setHasSkippedBody(Record.readInt());
+ FD->setIsMultiVersion(Record.readInt());
+ FD->setLateTemplateParsed(Record.readInt());
+
+ FD->setCachedLinkage(static_cast<Linkage>(Record.readInt()));
FD->EndRangeLoc = ReadSourceLocation();
FD->ODRHash = Record.readInt();
- FD->HasODRHash = true;
+ FD->setHasODRHash(true);
switch ((FunctionDecl::TemplatedKind)Record.readInt()) {
case FunctionDecl::TK_NonTemplate:
@@ -1006,18 +1006,18 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
MD->setVariadic(Record.readInt());
MD->setPropertyAccessor(Record.readInt());
MD->setDefined(Record.readInt());
- MD->IsOverriding = Record.readInt();
- MD->HasSkippedBody = Record.readInt();
+ MD->setOverriding(Record.readInt());
+ MD->setHasSkippedBody(Record.readInt());
- MD->IsRedeclaration = Record.readInt();
- MD->HasRedeclaration = Record.readInt();
- if (MD->HasRedeclaration)
+ MD->setIsRedeclaration(Record.readInt());
+ MD->setHasRedeclaration(Record.readInt());
+ if (MD->hasRedeclaration())
Reader.getContext().setObjCMethodRedeclaration(MD,
ReadDeclAs<ObjCMethodDecl>());
MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record.readInt());
MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record.readInt());
- MD->SetRelatedResultType(Record.readInt());
+ MD->setRelatedResultType(Record.readInt());
MD->setReturnType(Record.readType());
MD->setReturnTypeSourceInfo(GetTypeSourceInfo());
MD->DeclEndLoc = ReadSourceLocation();
@@ -1027,7 +1027,7 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
for (unsigned I = 0; I != NumParams; ++I)
Params.push_back(ReadDeclAs<ParmVarDecl>());
- MD->SelLocsKind = Record.readInt();
+ MD->setSelLocsKind((SelectorLocationsKind)Record.readInt());
unsigned NumStoredSelLocs = Record.readInt();
SmallVector<SourceLocation, 16> SelLocs;
SelLocs.reserve(NumStoredSelLocs);
@@ -1350,6 +1350,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->VarDeclBits.SClass = (StorageClass)Record.readInt();
VD->VarDeclBits.TSCSpec = Record.readInt();
VD->VarDeclBits.InitStyle = Record.readInt();
+ VD->VarDeclBits.ARCPseudoStrong = Record.readInt();
if (!isa<ParmVarDecl>(VD)) {
VD->NonParmVarDeclBits.IsThisDeclarationADemotedDefinition =
Record.readInt();
@@ -1357,13 +1358,13 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->NonParmVarDeclBits.NRVOVariable = Record.readInt();
VD->NonParmVarDeclBits.CXXForRangeDecl = Record.readInt();
VD->NonParmVarDeclBits.ObjCForDecl = Record.readInt();
- VD->NonParmVarDeclBits.ARCPseudoStrong = Record.readInt();
VD->NonParmVarDeclBits.IsInline = Record.readInt();
VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
VD->NonParmVarDeclBits.IsConstexpr = Record.readInt();
VD->NonParmVarDeclBits.IsInitCapture = Record.readInt();
VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt();
+ VD->NonParmVarDeclBits.EscapingByref = Record.readInt();
}
auto VarLinkage = Linkage(Record.readInt());
VD->setCachedLinkage(VarLinkage);
@@ -1382,6 +1383,12 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
}
}
+ if (VD->hasAttr<BlocksAttr>() && VD->getType()->getAsCXXRecordDecl()) {
+ Expr *CopyExpr = Record.readExpr();
+ if (CopyExpr)
+ Reader.getContext().setBlockVarCopyInit(VD, CopyExpr, Record.readInt());
+ }
+
if (VD->getStorageDuration() == SD_Static && Record.readInt())
Reader.DefinitionSource[VD] = Loc.F->Kind == ModuleKind::MK_MainFile;
@@ -1471,6 +1478,7 @@ void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
BD->setIsVariadic(Record.readInt());
BD->setBlockMissingReturnType(Record.readInt());
BD->setIsConversionFromLambda(Record.readInt());
+ BD->setDoesNotEscape(Record.readInt());
bool capturesCXXThis = Record.readInt();
unsigned numCaptures = Record.readInt();
@@ -1744,7 +1752,7 @@ void ASTDeclReader::MergeDefinitionData(
Reader.MergedDeclContexts.insert(std::make_pair(MergeDD.Definition,
DD.Definition));
Reader.PendingDefinitions.erase(MergeDD.Definition);
- MergeDD.Definition->IsCompleteDefinition = false;
+ MergeDD.Definition->setCompleteDefinition(false);
Reader.mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
"already loaded pending lookups for merged definition");
@@ -1884,7 +1892,7 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
}
// Mark this declaration as being a definition.
- D->IsCompleteDefinition = true;
+ D->setCompleteDefinition(true);
// If this is not the first declaration or is an update record, we can have
// other redeclarations already. Make a note that we need to propagate the
@@ -1946,7 +1954,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
// compute it.
if (WasDefinition) {
DeclID KeyFn = ReadDeclID();
- if (KeyFn && D->IsCompleteDefinition)
+ if (KeyFn && D->isCompleteDefinition())
// FIXME: This is wrong for the ARM ABI, where some other module may have
// made this function no longer be a key function. We need an update
// record or similar for that case.
@@ -1958,7 +1966,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
void ASTDeclReader::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
VisitFunctionDecl(D);
- D->IsCopyDeductionCandidate = Record.readInt();
+ D->setIsCopyDeductionCandidate(Record.readInt());
}
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
@@ -2624,13 +2632,31 @@ void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
D->setVars(Vars);
}
+void ASTDeclReader::VisitOMPRequiresDecl(OMPRequiresDecl * D) {
+ VisitDecl(D);
+ unsigned NumClauses = D->clauselist_size();
+ SmallVector<OMPClause *, 8> Clauses;
+ Clauses.reserve(NumClauses);
+ OMPClauseReader ClauseReader(Record);
+ for (unsigned I = 0; I != NumClauses; ++I)
+ Clauses.push_back(ClauseReader.readClause());
+ D->setClauses(Clauses);
+}
+
void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
VisitValueDecl(D);
D->setLocation(ReadSourceLocation());
- D->setCombiner(Record.readExpr());
- D->setInitializer(
- Record.readExpr(),
- static_cast<OMPDeclareReductionDecl::InitKind>(Record.readInt()));
+ Expr *In = Record.readExpr();
+ Expr *Out = Record.readExpr();
+ D->setCombinerData(In, Out);
+ Expr *Combiner = Record.readExpr();
+ D->setCombiner(Combiner);
+ Expr *Orig = Record.readExpr();
+ Expr *Priv = Record.readExpr();
+ D->setInitializerData(Orig, Priv);
+ Expr *Init = Record.readExpr();
+ auto IK = static_cast<OMPDeclareReductionDecl::InitKind>(Record.readInt());
+ D->setInitializer(Init, IK);
D->PrevDeclInScope = ReadDeclID();
}
@@ -2642,19 +2668,72 @@ void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
// Attribute Reading
//===----------------------------------------------------------------------===//
-/// Reads attributes from the current stream position.
-void ASTReader::ReadAttributes(ASTRecordReader &Record, AttrVec &Attrs) {
- for (unsigned i = 0, e = Record.readInt(); i != e; ++i) {
- Attr *New = nullptr;
- auto Kind = (attr::Kind)Record.readInt();
- SourceRange Range = Record.readSourceRange();
- ASTContext &Context = getContext();
+namespace {
+class AttrReader {
+ ModuleFile *F;
+ ASTReader *Reader;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
-#include "clang/Serialization/AttrPCHRead.inc"
+public:
+ AttrReader(ModuleFile &F, ASTReader &Reader,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : F(&F), Reader(&Reader), Record(Record), Idx(Idx) {}
- assert(New && "Unable to decode attribute?");
- Attrs.push_back(New);
+ const uint64_t &readInt() { return Record[Idx++]; }
+
+ SourceRange readSourceRange() {
+ return Reader->ReadSourceRange(*F, Record, Idx);
}
+
+ Expr *readExpr() { return Reader->ReadExpr(*F); }
+
+ std::string readString() {
+ return Reader->ReadString(Record, Idx);
+ }
+
+ TypeSourceInfo *getTypeSourceInfo() {
+ return Reader->GetTypeSourceInfo(*F, Record, Idx);
+ }
+
+ IdentifierInfo *getIdentifierInfo() {
+ return Reader->GetIdentifierInfo(*F, Record, Idx);
+ }
+
+ VersionTuple readVersionTuple() {
+ return ASTReader::ReadVersionTuple(Record, Idx);
+ }
+
+ template <typename T> T *GetLocalDeclAs(uint32_t LocalID) {
+ return cast_or_null<T>(Reader->GetLocalDecl(*F, LocalID));
+ }
+};
+}
+
+Attr *ASTReader::ReadAttr(ModuleFile &M, const RecordData &Rec,
+ unsigned &Idx) {
+ AttrReader Record(M, *this, Rec, Idx);
+ auto V = Record.readInt();
+ if (!V)
+ return nullptr;
+
+ Attr *New = nullptr;
+ // Kind is stored as a 1-based integer because 0 is used to indicate a null
+ // Attr pointer.
+ auto Kind = static_cast<attr::Kind>(V - 1);
+ SourceRange Range = Record.readSourceRange();
+ ASTContext &Context = getContext();
+
+#include "clang/Serialization/AttrPCHRead.inc"
+
+ assert(New && "Unable to decode attribute?");
+ return New;
+}
+
+/// Reads attributes from the current stream position.
+void ASTReader::ReadAttributes(ASTRecordReader &Record, AttrVec &Attrs) {
+ for (unsigned I = 0, E = Record.readInt(); I != E; ++I)
+ Attrs.push_back(Record.readAttr());
}
//===----------------------------------------------------------------------===//
@@ -2702,7 +2781,8 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
return !D->getDeclContext()->isFunctionOrMethod();
if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
- Var->isThisDeclarationADefinition() == VarDecl::Definition;
+ (Var->isThisDeclarationADefinition() == VarDecl::Definition ||
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Var));
if (const auto *Func = dyn_cast<FunctionDecl>(D))
return Func->doesThisDeclarationHaveABody() || HasBody;
@@ -2832,35 +2912,29 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
// Note that pass_object_size attributes are represented in the function's
// ExtParameterInfo, so we don't need to check them here.
- SmallVector<const EnableIfAttr *, 4> AEnableIfs;
- // Since this is an equality check, we can ignore that enable_if attrs show up
- // in reverse order.
- for (const auto *EIA : A->specific_attrs<EnableIfAttr>())
- AEnableIfs.push_back(EIA);
-
- SmallVector<const EnableIfAttr *, 4> BEnableIfs;
- for (const auto *EIA : B->specific_attrs<EnableIfAttr>())
- BEnableIfs.push_back(EIA);
+ llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+ auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
+ auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
- // Two very common cases: either we have 0 enable_if attrs, or we have an
- // unequal number of enable_if attrs.
- if (AEnableIfs.empty() && BEnableIfs.empty())
- return true;
+ for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
+ Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
- if (AEnableIfs.size() != BEnableIfs.size())
- return false;
+ // Return false if the number of enable_if attributes is different.
+ if (!Cand1A || !Cand2A)
+ return false;
- llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- for (unsigned I = 0, E = AEnableIfs.size(); I != E; ++I) {
Cand1ID.clear();
Cand2ID.clear();
- AEnableIfs[I]->getCond()->Profile(Cand1ID, A->getASTContext(), true);
- BEnableIfs[I]->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+ (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+
+ // Return false if any of the enable_if expressions of A and B are
+ // different.
if (Cand1ID != Cand2ID)
return false;
}
-
return true;
}
@@ -3076,7 +3150,7 @@ DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
// we load the update record.
if (!DD) {
DD = new (Reader.getContext()) struct CXXRecordDecl::DefinitionData(RD);
- RD->IsCompleteDefinition = true;
+ RD->setCompleteDefinition(true);
RD->DefinitionData = DD;
RD->getCanonicalDecl()->DefinitionData = DD;
@@ -3368,6 +3442,11 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
}
}
+static bool isUndeducedReturnType(QualType T) {
+ auto *DT = T->getContainedDeducedType();
+ return DT && !DT->isDeduced();
+}
+
template<>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<FunctionDecl> *D,
@@ -3380,7 +3459,7 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
// If the previous declaration is an inline function declaration, then this
// declaration is too.
- if (PrevFD->IsInline != FD->IsInline) {
+ if (PrevFD->isInlined() != FD->isInlined()) {
// FIXME: [dcl.fct.spec]p4:
// If a function with external linkage is declared inline in one
// translation unit, it shall be declared inline in all translation
@@ -3396,20 +3475,29 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
// module C instantiates the definition of X<int>::f
//
// If module B and C are merged, we do not have a violation of this rule.
- FD->IsInline = true;
+ FD->setImplicitlyInline(true);
}
- // If we need to propagate an exception specification along the redecl
- // chain, make a note of that so that we can do so later.
auto *FPT = FD->getType()->getAs<FunctionProtoType>();
auto *PrevFPT = PrevFD->getType()->getAs<FunctionProtoType>();
if (FPT && PrevFPT) {
+ // If we need to propagate an exception specification along the redecl
+ // chain, make a note of that so that we can do so later.
bool IsUnresolved = isUnresolvedExceptionSpec(FPT->getExceptionSpecType());
bool WasUnresolved =
isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType());
if (IsUnresolved != WasUnresolved)
Reader.PendingExceptionSpecUpdates.insert(
- std::make_pair(Canon, IsUnresolved ? PrevFD : FD));
+ {Canon, IsUnresolved ? PrevFD : FD});
+
+ // If we need to propagate a deduced return type along the redecl chain,
+ // make a note of that so that we can do it later.
+ bool IsUndeduced = isUndeducedReturnType(FPT->getReturnType());
+ bool WasUndeduced = isUndeducedReturnType(PrevFPT->getReturnType());
+ if (IsUndeduced != WasUndeduced)
+ Reader.PendingDeducedTypeUpdates.insert(
+ {cast<FunctionDecl>(Canon),
+ (IsUndeduced ? PrevFPT : FPT)->getReturnType()});
}
}
@@ -3760,6 +3848,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_OMP_THREADPRIVATE:
D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
+ case DECL_OMP_REQUIRES:
+ D = OMPRequiresDecl::CreateDeserialized(Context, ID, Record.readInt());
+ break;
case DECL_OMP_DECLARE_REDUCTION:
D = OMPDeclareReductionDecl::CreateDeserialized(Context, ID);
break;
@@ -4326,14 +4417,10 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
case UPD_CXX_DEDUCED_RETURN_TYPE: {
- // FIXME: Also do this when merging redecls.
+ auto *FD = cast<FunctionDecl>(D);
QualType DeducedResultType = Record.readType();
- for (auto *Redecl : merged_redecls(D)) {
- // FIXME: If the return type is already deduced, check that it matches.
- auto *FD = cast<FunctionDecl>(Redecl);
- Reader.getContext().adjustDeducedFunctionResultType(FD,
- DeducedResultType);
- }
+ Reader.PendingDeducedTypeUpdates.insert(
+ {FD->getCanonicalDecl(), DeducedResultType});
break;
}
@@ -4360,26 +4447,19 @@ void ASTDeclReader::UpdateDecl(Decl *D,
case UPD_DECL_EXPORTED: {
unsigned SubmoduleID = readSubmoduleID();
auto *Exported = cast<NamedDecl>(D);
- if (auto *TD = dyn_cast<TagDecl>(Exported))
- Exported = TD->getDefinition();
Module *Owner = SubmoduleID ? Reader.getSubmodule(SubmoduleID) : nullptr;
- if (Reader.getContext().getLangOpts().ModulesLocalVisibility) {
- Reader.getContext().mergeDefinitionIntoModule(cast<NamedDecl>(Exported),
- Owner);
- Reader.PendingMergedDefinitionsToDeduplicate.insert(
- cast<NamedDecl>(Exported));
- } else if (Owner && Owner->NameVisibility != Module::AllVisible) {
- // If Owner is made visible at some later point, make this declaration
- // visible too.
- Reader.HiddenNamesMap[Owner].push_back(Exported);
- } else {
- // The declaration is now visible.
- Exported->setVisibleDespiteOwningModule();
- }
+ Reader.getContext().mergeDefinitionIntoModule(Exported, Owner);
+ Reader.PendingMergedDefinitionsToDeduplicate.insert(Exported);
break;
}
case UPD_DECL_MARKED_OPENMP_DECLARETARGET:
+ D->addAttr(OMPDeclareTargetDeclAttr::CreateImplicit(
+ Reader.getContext(),
+ static_cast<OMPDeclareTargetDeclAttr::MapTypeTy>(Record.readInt()),
+ ReadSourceRange()));
+ break;
+
case UPD_ADDED_ATTR_TO_RECORD:
AttrVec Attrs;
Record.readAttributes(Attrs);
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index d9d780b25b31..60abea95bfaf 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -154,7 +154,7 @@ void ASTStmtReader::VisitStmt(Stmt *S) {
void ASTStmtReader::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
S->setSemiLoc(ReadSourceLocation());
- S->HasLeadingEmptyMacro = Record.readInt();
+ S->NullStmtBits.HasLeadingEmptyMacro = Record.readInt();
}
void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
@@ -164,7 +164,7 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
while (NumStmts--)
Stmts.push_back(Record.readSubStmt());
S->setStmts(Stmts);
- S->LBraceLoc = ReadSourceLocation();
+ S->CompoundStmtBits.LBraceLoc = ReadSourceLocation();
S->RBraceLoc = ReadSourceLocation();
}
@@ -177,10 +177,13 @@ void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
+ bool CaseStmtIsGNURange = Record.readInt();
S->setLHS(Record.readSubExpr());
- S->setRHS(Record.readSubExpr());
S->setSubStmt(Record.readSubStmt());
- S->setEllipsisLoc(ReadSourceLocation());
+ if (CaseStmtIsGNURange) {
+ S->setRHS(Record.readSubExpr());
+ S->setEllipsisLoc(ReadSourceLocation());
+ }
}
void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
@@ -199,38 +202,59 @@ void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
VisitStmt(S);
+ // NumAttrs in AttributedStmt is set when creating an empty
+ // AttributedStmt in AttributedStmt::CreateEmpty, since it is needed
+ // to allocate the right amount of space for the trailing Attr *.
uint64_t NumAttrs = Record.readInt();
AttrVec Attrs;
Record.readAttributes(Attrs);
(void)NumAttrs;
- assert(NumAttrs == S->NumAttrs);
+ assert(NumAttrs == S->AttributedStmtBits.NumAttrs);
assert(NumAttrs == Attrs.size());
std::copy(Attrs.begin(), Attrs.end(), S->getAttrArrayPtr());
S->SubStmt = Record.readSubStmt();
- S->AttrLoc = ReadSourceLocation();
+ S->AttributedStmtBits.AttrLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
+
S->setConstexpr(Record.readInt());
- S->setInit(Record.readSubStmt());
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ bool HasElse = Record.readInt();
+ bool HasVar = Record.readInt();
+ bool HasInit = Record.readInt();
+
S->setCond(Record.readSubExpr());
S->setThen(Record.readSubStmt());
- S->setElse(Record.readSubStmt());
+ if (HasElse)
+ S->setElse(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ if (HasInit)
+ S->setInit(Record.readSubStmt());
+
S->setIfLoc(ReadSourceLocation());
- S->setElseLoc(ReadSourceLocation());
+ if (HasElse)
+ S->setElseLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- S->setInit(Record.readSubStmt());
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
+ bool HasInit = Record.readInt();
+ bool HasVar = Record.readInt();
+ bool AllEnumCasesCovered = Record.readInt();
+ if (AllEnumCasesCovered)
+ S->setAllEnumCasesCovered();
+
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
+ if (HasInit)
+ S->setInit(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
S->setSwitchLoc(ReadSourceLocation());
- if (Record.readInt())
- S->setAllEnumCasesCovered();
SwitchCase *PrevSC = nullptr;
for (auto E = Record.size(); Record.getIdx() != E; ) {
@@ -246,10 +270,14 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
+ bool HasVar = Record.readInt();
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
+ if (HasVar)
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+
S->setWhileLoc(ReadSourceLocation());
}
@@ -300,9 +328,14 @@ void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
+
+ bool HasNRVOCandidate = Record.readInt();
+
S->setRetValue(Record.readSubExpr());
+ if (HasNRVOCandidate)
+ S->setNRVOCandidate(ReadDeclAs<VarDecl>());
+
S->setReturnLoc(ReadSourceLocation());
- S->setNRVOCandidate(ReadDeclAs<VarDecl>());
}
void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
@@ -491,11 +524,19 @@ void ASTStmtReader::VisitExpr(Expr *E) {
"Incorrect expression field count");
}
+void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(Record.readSubExpr());
+}
+
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
+ bool HasFunctionName = Record.readInt();
+ E->PredefinedExprBits.HasFunctionName = HasFunctionName;
+ E->PredefinedExprBits.Kind = Record.readInt();
E->setLocation(ReadSourceLocation());
- E->Type = (PredefinedExpr::IdentType)Record.readInt();
- E->FnName = cast_or_null<StringLiteral>(Record.readSubExpr());
+ if (HasFunctionName)
+ E->setFunctionName(cast<StringLiteral>(Record.readSubExpr()));
}
void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -554,22 +595,35 @@ void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
- unsigned Len = Record.readInt();
- assert(Record.peekInt() == E->getNumConcatenated() &&
- "Wrong number of concatenated tokens!");
- Record.skipInts(1);
- auto kind = static_cast<StringLiteral::StringKind>(Record.readInt());
- bool isPascal = Record.readInt();
- // Read string data
- auto B = &Record.peekInt();
- SmallString<16> Str(B, B + Len);
- E->setString(Record.getContext(), Str, kind, isPascal);
- Record.skipInts(Len);
-
- // Read source locations
- for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ // NumConcatenated, Length and CharByteWidth are set by the empty
+ // ctor since they are needed to allocate storage for the trailing objects.
+ unsigned NumConcatenated = Record.readInt();
+ unsigned Length = Record.readInt();
+ unsigned CharByteWidth = Record.readInt();
+ assert((NumConcatenated == E->getNumConcatenated()) &&
+ "Wrong number of concatenated tokens!");
+ assert((Length == E->getLength()) && "Wrong Length!");
+ assert((CharByteWidth == E->getCharByteWidth()) && "Wrong character width!");
+ E->StringLiteralBits.Kind = Record.readInt();
+ E->StringLiteralBits.IsPascal = Record.readInt();
+
+ // The character width is originally computed via mapCharByteWidth.
+ // Check that the deserialized character width is consistant with the result
+ // of calling mapCharByteWidth.
+ assert((CharByteWidth ==
+ StringLiteral::mapCharByteWidth(Record.getContext().getTargetInfo(),
+ E->getKind())) &&
+ "Wrong character width!");
+
+ // Deserialize the trailing array of SourceLocation.
+ for (unsigned I = 0; I < NumConcatenated; ++I)
E->setStrTokenLoc(I, ReadSourceLocation());
+
+ // Deserialize the trailing array of char holding the string data.
+ char *StrData = E->getStrDataAsChar();
+ for (unsigned I = 0; I < Length * CharByteWidth; ++I)
+ StrData[I] = Record.readInt();
}
void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
@@ -589,10 +643,9 @@ void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
VisitExpr(E);
unsigned NumExprs = Record.readInt();
- E->Exprs = new (Record.getContext()) Stmt*[NumExprs];
- for (unsigned i = 0; i != NumExprs; ++i)
- E->Exprs[i] = Record.readSubStmt();
- E->NumExprs = NumExprs;
+ assert((NumExprs == E->getNumExprs()) && "Wrong NumExprs!");
+ for (unsigned I = 0; I != NumExprs; ++I)
+ E->getTrailingObjects<Stmt *>()[I] = Record.readSubStmt();
E->LParenLoc = ReadSourceLocation();
E->RParenLoc = ReadSourceLocation();
}
@@ -678,11 +731,13 @@ void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
- E->setNumArgs(Record.getContext(), Record.readInt());
+ unsigned NumArgs = Record.readInt();
+ assert((NumArgs == E->getNumArgs()) && "Wrong NumArgs!");
E->setRParenLoc(ReadSourceLocation());
E->setCallee(Record.readSubExpr());
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ for (unsigned I = 0; I != NumArgs; ++I)
E->setArg(I, Record.readSubExpr());
+ E->setADLCallKind(static_cast<CallExpr::ADLCallKind>(Record.readInt()));
}
void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
@@ -1270,6 +1325,7 @@ void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
S->CoawaitLoc = ReadSourceLocation();
S->ColonLoc = ReadSourceLocation();
S->RParenLoc = ReadSourceLocation();
+ S->setInit(Record.readSubStmt());
S->setRangeStmt(Record.readSubStmt());
S->setBeginStmt(Record.readSubStmt());
S->setEndStmt(Record.readSubStmt());
@@ -1290,27 +1346,29 @@ void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
- E->Operator = (OverloadedOperatorKind)Record.readInt();
+ E->CXXOperatorCallExprBits.OperatorKind = Record.readInt();
+ E->CXXOperatorCallExprBits.FPFeatures = Record.readInt();
E->Range = Record.readSourceRange();
- E->setFPFeatures(FPOptions(Record.readInt()));
}
void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
VisitExpr(E);
- E->NumArgs = Record.readInt();
- if (E->NumArgs)
- E->Args = new (Record.getContext()) Stmt*[E->NumArgs];
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, Record.readSubExpr());
- E->setConstructor(ReadDeclAs<CXXConstructorDecl>());
- E->setLocation(ReadSourceLocation());
- E->setElidable(Record.readInt());
- E->setHadMultipleCandidates(Record.readInt());
- E->setListInitialization(Record.readInt());
- E->setStdInitListInitialization(Record.readInt());
- E->setRequiresZeroInitialization(Record.readInt());
- E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record.readInt());
+
+ unsigned NumArgs = Record.readInt();
+ assert((NumArgs == E->getNumArgs()) && "Wrong NumArgs!");
+
+ E->CXXConstructExprBits.Elidable = Record.readInt();
+ E->CXXConstructExprBits.HadMultipleCandidates = Record.readInt();
+ E->CXXConstructExprBits.ListInitialization = Record.readInt();
+ E->CXXConstructExprBits.StdInitListInitialization = Record.readInt();
+ E->CXXConstructExprBits.ZeroInitialization = Record.readInt();
+ E->CXXConstructExprBits.ConstructionKind = Record.readInt();
+ E->CXXConstructExprBits.Loc = ReadSourceLocation();
+ E->Constructor = ReadDeclAs<CXXConstructorDecl>();
E->ParenOrBraceRange = ReadSourceRange();
+
+ for (unsigned I = 0; I != NumArgs; ++I)
+ E->setArg(I, Record.readSubExpr());
}
void ASTStmtReader::VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E) {
@@ -1323,7 +1381,7 @@ void ASTStmtReader::VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E) {
void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
VisitCXXConstructExpr(E);
- E->Type = GetTypeSourceInfo();
+ E->TSI = GetTypeSourceInfo();
}
void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
@@ -1418,21 +1476,21 @@ void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
- E->ThrowLoc = ReadSourceLocation();
- E->Op = Record.readSubExpr();
- E->IsThrownVariableInScope = Record.readInt();
+ E->CXXThrowExprBits.ThrowLoc = ReadSourceLocation();
+ E->Operand = Record.readSubExpr();
+ E->CXXThrowExprBits.IsThrownVariableInScope = Record.readInt();
}
void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
E->Param = ReadDeclAs<ParmVarDecl>();
- E->Loc = ReadSourceLocation();
+ E->CXXDefaultArgExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
E->Field = ReadDeclAs<FieldDecl>();
- E->Loc = ReadSourceLocation();
+ E->CXXDefaultInitExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1444,42 +1502,55 @@ void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
E->TypeInfo = GetTypeSourceInfo();
- E->RParenLoc = ReadSourceLocation();
+ E->CXXScalarValueInitExprBits.RParenLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
- E->GlobalNew = Record.readInt();
- bool isArray = Record.readInt();
- E->PassAlignment = Record.readInt();
- E->UsualArrayDeleteWantsSize = Record.readInt();
+
+ bool IsArray = Record.readInt();
+ bool HasInit = Record.readInt();
unsigned NumPlacementArgs = Record.readInt();
- E->StoredInitializationStyle = Record.readInt();
+ bool IsParenTypeId = Record.readInt();
+
+ E->CXXNewExprBits.IsGlobalNew = Record.readInt();
+ E->CXXNewExprBits.ShouldPassAlignment = Record.readInt();
+ E->CXXNewExprBits.UsualArrayDeleteWantsSize = Record.readInt();
+ E->CXXNewExprBits.StoredInitializationStyle = Record.readInt();
+
+ assert((IsArray == E->isArray()) && "Wrong IsArray!");
+ assert((HasInit == E->hasInitializer()) && "Wrong HasInit!");
+ assert((NumPlacementArgs == E->getNumPlacementArgs()) &&
+ "Wrong NumPlacementArgs!");
+ assert((IsParenTypeId == E->isParenTypeId()) && "Wrong IsParenTypeId!");
+ (void)IsArray;
+ (void)HasInit;
+ (void)NumPlacementArgs;
+
E->setOperatorNew(ReadDeclAs<FunctionDecl>());
E->setOperatorDelete(ReadDeclAs<FunctionDecl>());
E->AllocatedTypeInfo = GetTypeSourceInfo();
- E->TypeIdParens = ReadSourceRange();
+ if (IsParenTypeId)
+ E->getTrailingObjects<SourceRange>()[0] = ReadSourceRange();
E->Range = ReadSourceRange();
E->DirectInitRange = ReadSourceRange();
- E->AllocateArgsArray(Record.getContext(), isArray, NumPlacementArgs,
- E->StoredInitializationStyle != 0);
-
// Install all the subexpressions.
- for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
- I != e; ++I)
+ for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),
+ N = E->raw_arg_end();
+ I != N; ++I)
*I = Record.readSubStmt();
}
void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
VisitExpr(E);
- E->GlobalDelete = Record.readInt();
- E->ArrayForm = Record.readInt();
- E->ArrayFormAsWritten = Record.readInt();
- E->UsualArrayDeleteWantsSize = Record.readInt();
+ E->CXXDeleteExprBits.GlobalDelete = Record.readInt();
+ E->CXXDeleteExprBits.ArrayForm = Record.readInt();
+ E->CXXDeleteExprBits.ArrayFormAsWritten = Record.readInt();
+ E->CXXDeleteExprBits.UsualArrayDeleteWantsSize = Record.readInt();
E->OperatorDelete = ReadDeclAs<FunctionDecl>();
E->Argument = Record.readSubExpr();
- E->Loc = ReadSourceLocation();
+ E->CXXDeleteExprBits.Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
@@ -1513,22 +1584,37 @@ void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
E->SubExpr = Record.readSubExpr();
}
-void
-ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+void ASTStmtReader::VisitCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *E) {
VisitExpr(E);
- if (Record.readInt()) // HasTemplateKWAndArgsInfo
+ bool HasTemplateKWAndArgsInfo = Record.readInt();
+ unsigned NumTemplateArgs = Record.readInt();
+ bool HasFirstQualifierFoundInScope = Record.readInt();
+
+ assert((HasTemplateKWAndArgsInfo == E->hasTemplateKWAndArgsInfo()) &&
+ "Wrong HasTemplateKWAndArgsInfo!");
+ assert(
+ (HasFirstQualifierFoundInScope == E->hasFirstQualifierFoundInScope()) &&
+ "Wrong HasFirstQualifierFoundInScope!");
+
+ if (HasTemplateKWAndArgsInfo)
ReadTemplateKWAndArgsInfo(
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
- E->getTrailingObjects<TemplateArgumentLoc>(),
- /*NumTemplateArgs=*/Record.readInt());
+ E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
- E->Base = Record.readSubExpr();
+ assert((NumTemplateArgs == E->getNumTemplateArgs()) &&
+ "Wrong NumTemplateArgs!");
+
+ E->CXXDependentScopeMemberExprBits.IsArrow = Record.readInt();
+ E->CXXDependentScopeMemberExprBits.OperatorLoc = ReadSourceLocation();
E->BaseType = Record.readType();
- E->IsArrow = Record.readInt();
- E->OperatorLoc = ReadSourceLocation();
E->QualifierLoc = Record.readNestedNameSpecifierLoc();
- E->FirstQualifierFoundInScope = ReadDeclAs<NamedDecl>();
+ E->Base = Record.readSubExpr();
+
+ if (HasFirstQualifierFoundInScope)
+ *E->getTrailingObjects<NamedDecl *>() = ReadDeclAs<NamedDecl>();
+
ReadDeclarationNameInfo(E->MemberNameInfo);
}
@@ -1554,7 +1640,7 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
Record.skipInts(1);
for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
E->setArg(I, Record.readSubExpr());
- E->Type = GetTypeSourceInfo();
+ E->TSI = GetTypeSourceInfo();
E->setLParenLoc(ReadSourceLocation());
E->setRParenLoc(ReadSourceLocation());
}
@@ -1562,19 +1648,33 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
- if (Record.readInt()) // HasTemplateKWAndArgsInfo
+ unsigned NumResults = Record.readInt();
+ bool HasTemplateKWAndArgsInfo = Record.readInt();
+ assert((E->getNumDecls() == NumResults) && "Wrong NumResults!");
+ assert((E->hasTemplateKWAndArgsInfo() == HasTemplateKWAndArgsInfo) &&
+ "Wrong HasTemplateKWAndArgsInfo!");
+
+ if (HasTemplateKWAndArgsInfo) {
+ unsigned NumTemplateArgs = Record.readInt();
ReadTemplateKWAndArgsInfo(*E->getTrailingASTTemplateKWAndArgsInfo(),
E->getTrailingTemplateArgumentLoc(),
- /*NumTemplateArgs=*/Record.readInt());
+ NumTemplateArgs);
+ assert((E->getNumTemplateArgs() == NumTemplateArgs) &&
+ "Wrong NumTemplateArgs!");
+ }
- unsigned NumDecls = Record.readInt();
UnresolvedSet<8> Decls;
- for (unsigned i = 0; i != NumDecls; ++i) {
+ for (unsigned I = 0; I != NumResults; ++I) {
auto *D = ReadDeclAs<NamedDecl>();
auto AS = (AccessSpecifier)Record.readInt();
Decls.addDecl(D, AS);
}
- E->initializeResults(Record.getContext(), Decls.begin(), Decls.end());
+
+ DeclAccessPair *Results = E->getTrailingResults();
+ UnresolvedSetIterator Iter = Decls.begin();
+ for (unsigned I = 0; I != NumResults; ++I) {
+ Results[I] = (Iter + I).getPair();
+ }
ReadDeclarationNameInfo(E->NameInfo);
E->QualifierLoc = Record.readNestedNameSpecifierLoc();
@@ -1582,8 +1682,8 @@ void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
VisitOverloadExpr(E);
- E->IsArrow = Record.readInt();
- E->HasUnresolvedUsing = Record.readInt();
+ E->UnresolvedMemberExprBits.IsArrow = Record.readInt();
+ E->UnresolvedMemberExprBits.HasUnresolvedUsing = Record.readInt();
E->Base = Record.readSubExpr();
E->BaseType = Record.readType();
E->OperatorLoc = ReadSourceLocation();
@@ -1591,8 +1691,8 @@ void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
- E->RequiresADL = Record.readInt();
- E->Overloaded = Record.readInt();
+ E->UnresolvedLookupExprBits.RequiresADL = Record.readInt();
+ E->UnresolvedLookupExprBits.Overloaded = Record.readInt();
E->NamingClass = ReadDeclAs<CXXRecordDecl>();
}
@@ -1633,7 +1733,7 @@ void ASTStmtReader::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
void ASTStmtReader::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
VisitExpr(E);
- E->Value = (bool)Record.readInt();
+ E->CXXNoexceptExprBits.Value = Record.readInt();
E->Range = ReadSourceRange();
E->Operand = Record.readSubExpr();
}
@@ -1667,7 +1767,7 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
E->Param = ReadDeclAs<NonTypeTemplateParmDecl>();
- E->NameLoc = ReadSourceLocation();
+ E->SubstNonTypeTemplateParmExprBits.NameLoc = ReadSourceLocation();
E->Replacement = Record.readSubExpr();
}
@@ -1715,7 +1815,7 @@ void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
E->SourceExpr = Record.readSubExpr();
- E->Loc = ReadSourceLocation();
+ E->OpaqueValueExprBits.Loc = ReadSourceLocation();
E->setIsUnique(Record.readInt());
}
@@ -1803,895 +1903,13 @@ void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
}
//===----------------------------------------------------------------------===//
-// OpenMP Clauses.
-//===----------------------------------------------------------------------===//
-
-namespace clang {
-
-class OMPClauseReader : public OMPClauseVisitor<OMPClauseReader> {
- ASTStmtReader *Reader;
- ASTContext &Context;
-
-public:
- OMPClauseReader(ASTStmtReader *R, ASTRecordReader &Record)
- : Reader(R), Context(Record.getContext()) {}
-
-#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *C);
-#include "clang/Basic/OpenMPKinds.def"
- OMPClause *readClause();
- void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
- void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
-};
-
-} // namespace clang
-
-OMPClause *OMPClauseReader::readClause() {
- OMPClause *C;
- switch (Reader->Record.readInt()) {
- case OMPC_if:
- C = new (Context) OMPIfClause();
- break;
- case OMPC_final:
- C = new (Context) OMPFinalClause();
- break;
- case OMPC_num_threads:
- C = new (Context) OMPNumThreadsClause();
- break;
- case OMPC_safelen:
- C = new (Context) OMPSafelenClause();
- break;
- case OMPC_simdlen:
- C = new (Context) OMPSimdlenClause();
- break;
- case OMPC_collapse:
- C = new (Context) OMPCollapseClause();
- break;
- case OMPC_default:
- C = new (Context) OMPDefaultClause();
- break;
- case OMPC_proc_bind:
- C = new (Context) OMPProcBindClause();
- break;
- case OMPC_schedule:
- C = new (Context) OMPScheduleClause();
- break;
- case OMPC_ordered:
- C = new (Context) OMPOrderedClause();
- break;
- case OMPC_nowait:
- C = new (Context) OMPNowaitClause();
- break;
- case OMPC_untied:
- C = new (Context) OMPUntiedClause();
- break;
- case OMPC_mergeable:
- C = new (Context) OMPMergeableClause();
- break;
- case OMPC_read:
- C = new (Context) OMPReadClause();
- break;
- case OMPC_write:
- C = new (Context) OMPWriteClause();
- break;
- case OMPC_update:
- C = new (Context) OMPUpdateClause();
- break;
- case OMPC_capture:
- C = new (Context) OMPCaptureClause();
- break;
- case OMPC_seq_cst:
- C = new (Context) OMPSeqCstClause();
- break;
- case OMPC_threads:
- C = new (Context) OMPThreadsClause();
- break;
- case OMPC_simd:
- C = new (Context) OMPSIMDClause();
- break;
- case OMPC_nogroup:
- C = new (Context) OMPNogroupClause();
- break;
- case OMPC_private:
- C = OMPPrivateClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_firstprivate:
- C = OMPFirstprivateClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_lastprivate:
- C = OMPLastprivateClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_shared:
- C = OMPSharedClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_reduction:
- C = OMPReductionClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_task_reduction:
- C = OMPTaskReductionClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_in_reduction:
- C = OMPInReductionClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_linear:
- C = OMPLinearClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_aligned:
- C = OMPAlignedClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_copyin:
- C = OMPCopyinClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_copyprivate:
- C = OMPCopyprivateClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_flush:
- C = OMPFlushClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_depend:
- C = OMPDependClause::CreateEmpty(Context, Reader->Record.readInt());
- break;
- case OMPC_device:
- C = new (Context) OMPDeviceClause();
- break;
- case OMPC_map: {
- unsigned NumVars = Reader->Record.readInt();
- unsigned NumDeclarations = Reader->Record.readInt();
- unsigned NumLists = Reader->Record.readInt();
- unsigned NumComponents = Reader->Record.readInt();
- C = OMPMapClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
- break;
- }
- case OMPC_num_teams:
- C = new (Context) OMPNumTeamsClause();
- break;
- case OMPC_thread_limit:
- C = new (Context) OMPThreadLimitClause();
- break;
- case OMPC_priority:
- C = new (Context) OMPPriorityClause();
- break;
- case OMPC_grainsize:
- C = new (Context) OMPGrainsizeClause();
- break;
- case OMPC_num_tasks:
- C = new (Context) OMPNumTasksClause();
- break;
- case OMPC_hint:
- C = new (Context) OMPHintClause();
- break;
- case OMPC_dist_schedule:
- C = new (Context) OMPDistScheduleClause();
- break;
- case OMPC_defaultmap:
- C = new (Context) OMPDefaultmapClause();
- break;
- case OMPC_to: {
- unsigned NumVars = Reader->Record.readInt();
- unsigned NumDeclarations = Reader->Record.readInt();
- unsigned NumLists = Reader->Record.readInt();
- unsigned NumComponents = Reader->Record.readInt();
- C = OMPToClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
- break;
- }
- case OMPC_from: {
- unsigned NumVars = Reader->Record.readInt();
- unsigned NumDeclarations = Reader->Record.readInt();
- unsigned NumLists = Reader->Record.readInt();
- unsigned NumComponents = Reader->Record.readInt();
- C = OMPFromClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
- NumComponents);
- break;
- }
- case OMPC_use_device_ptr: {
- unsigned NumVars = Reader->Record.readInt();
- unsigned NumDeclarations = Reader->Record.readInt();
- unsigned NumLists = Reader->Record.readInt();
- unsigned NumComponents = Reader->Record.readInt();
- C = OMPUseDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
- NumLists, NumComponents);
- break;
- }
- case OMPC_is_device_ptr: {
- unsigned NumVars = Reader->Record.readInt();
- unsigned NumDeclarations = Reader->Record.readInt();
- unsigned NumLists = Reader->Record.readInt();
- unsigned NumComponents = Reader->Record.readInt();
- C = OMPIsDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
- NumLists, NumComponents);
- break;
- }
- }
- Visit(C);
- C->setLocStart(Reader->ReadSourceLocation());
- C->setLocEnd(Reader->ReadSourceLocation());
-
- return C;
-}
-
-void OMPClauseReader::VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C) {
- C->setPreInitStmt(Reader->Record.readSubStmt(),
- static_cast<OpenMPDirectiveKind>(Reader->Record.readInt()));
-}
-
-void OMPClauseReader::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
- VisitOMPClauseWithPreInit(C);
- C->setPostUpdateExpr(Reader->Record.readSubExpr());
-}
-
-void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setNameModifier(static_cast<OpenMPDirectiveKind>(Reader->Record.readInt()));
- C->setNameModifierLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- C->setCondition(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPFinalClause(OMPFinalClause *C) {
- C->setCondition(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setNumThreads(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
- C->setSafelen(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
- C->setSimdlen(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
- C->setNumForLoops(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
- C->setDefaultKind(
- static_cast<OpenMPDefaultClauseKind>(Reader->Record.readInt()));
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setDefaultKindKwLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
- C->setProcBindKind(
- static_cast<OpenMPProcBindClauseKind>(Reader->Record.readInt()));
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setProcBindKindKwLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setScheduleKind(
- static_cast<OpenMPScheduleClauseKind>(Reader->Record.readInt()));
- C->setFirstScheduleModifier(
- static_cast<OpenMPScheduleClauseModifier>(Reader->Record.readInt()));
- C->setSecondScheduleModifier(
- static_cast<OpenMPScheduleClauseModifier>(Reader->Record.readInt()));
- C->setChunkSize(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setFirstScheduleModifierLoc(Reader->ReadSourceLocation());
- C->setSecondScheduleModifierLoc(Reader->ReadSourceLocation());
- C->setScheduleKindLoc(Reader->ReadSourceLocation());
- C->setCommaLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
- C->setNumForLoops(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
-
-void OMPClauseReader::VisitOMPUntiedClause(OMPUntiedClause *) {}
-
-void OMPClauseReader::VisitOMPMergeableClause(OMPMergeableClause *) {}
-
-void OMPClauseReader::VisitOMPReadClause(OMPReadClause *) {}
-
-void OMPClauseReader::VisitOMPWriteClause(OMPWriteClause *) {}
-
-void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *) {}
-
-void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
-
-void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
-
-void OMPClauseReader::VisitOMPThreadsClause(OMPThreadsClause *) {}
-
-void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
-
-void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
-
-void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivateCopies(Vars);
-}
-
-void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivateCopies(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setInits(Vars);
-}
-
-void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
- VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivateCopies(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setSourceExprs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setDestinationExprs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setAssignmentOps(Vars);
-}
-
-void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
-}
-
-void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
- VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- NestedNameSpecifierLoc NNSL = Reader->Record.readNestedNameSpecifierLoc();
- DeclarationNameInfo DNI;
- Reader->ReadDeclarationNameInfo(DNI);
- C->setQualifierLoc(NNSL);
- C->setNameInfo(DNI);
-
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivates(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setLHSExprs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setRHSExprs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setReductionOps(Vars);
-}
-
-void OMPClauseReader::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
- VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- NestedNameSpecifierLoc NNSL = Reader->Record.readNestedNameSpecifierLoc();
- DeclarationNameInfo DNI;
- Reader->ReadDeclarationNameInfo(DNI);
- C->setQualifierLoc(NNSL);
- C->setNameInfo(DNI);
-
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivates(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setLHSExprs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setRHSExprs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setReductionOps(Vars);
-}
-
-void OMPClauseReader::VisitOMPInReductionClause(OMPInReductionClause *C) {
- VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- NestedNameSpecifierLoc NNSL = Reader->Record.readNestedNameSpecifierLoc();
- DeclarationNameInfo DNI;
- Reader->ReadDeclarationNameInfo(DNI);
- C->setQualifierLoc(NNSL);
- C->setNameInfo(DNI);
-
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivates(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setLHSExprs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setRHSExprs(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setReductionOps(Vars);
- Vars.clear();
- for (unsigned I = 0; I != NumVars; ++I)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setTaskgroupDescriptors(Vars);
-}
-
-void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
- VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- C->setModifier(static_cast<OpenMPLinearClauseKind>(Reader->Record.readInt()));
- C->setModifierLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivates(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setInits(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setUpdates(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setFinals(Vars);
- C->setStep(Reader->Record.readSubExpr());
- C->setCalcStep(Reader->Record.readSubExpr());
-}
-
-void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- C->setAlignment(Reader->Record.readSubExpr());
-}
-
-void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Exprs;
- Exprs.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setSourceExprs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setDestinationExprs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setAssignmentOps(Exprs);
-}
-
-void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Exprs;
- Exprs.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setSourceExprs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setDestinationExprs(Exprs);
- Exprs.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Record.readSubExpr());
- C->setAssignmentOps(Exprs);
-}
-
-void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
-}
-
-void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setDependencyKind(
- static_cast<OpenMPDependClauseKind>(Reader->Record.readInt()));
- C->setDependencyLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- unsigned NumVars = C->varlist_size();
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- C->setCounterValue(Reader->Record.readSubExpr());
-}
-
-void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setDevice(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setMapTypeModifier(
- static_cast<OpenMPMapClauseKind>(Reader->Record.readInt()));
- C->setMapType(
- static_cast<OpenMPMapClauseKind>(Reader->Record.readInt()));
- C->setMapLoc(Reader->ReadSourceLocation());
- C->setColonLoc(Reader->ReadSourceLocation());
- auto NumVars = C->varlist_size();
- auto UniqueDecls = C->getUniqueDeclarationsNum();
- auto TotalLists = C->getTotalComponentListNum();
- auto TotalComponents = C->getTotalComponentsNum();
-
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
-
- SmallVector<ValueDecl *, 16> Decls;
- Decls.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
- C->setUniqueDecls(Decls);
-
- SmallVector<unsigned, 16> ListsPerDecl;
- ListsPerDecl.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Reader->Record.readInt());
- C->setDeclNumLists(ListsPerDecl);
-
- SmallVector<unsigned, 32> ListSizes;
- ListSizes.reserve(TotalLists);
- for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Reader->Record.readInt());
- C->setComponentListSizes(ListSizes);
-
- SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
- Components.reserve(TotalComponents);
- for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Record.readSubExpr();
- auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
- }
- C->setComponents(Components, ListSizes);
-}
-
-void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setNumTeams(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setThreadLimit(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
- C->setPriority(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
- C->setGrainsize(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
- C->setNumTasks(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPHintClause(OMPHintClause *C) {
- C->setHint(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPDistScheduleClause(OMPDistScheduleClause *C) {
- VisitOMPClauseWithPreInit(C);
- C->setDistScheduleKind(
- static_cast<OpenMPDistScheduleClauseKind>(Reader->Record.readInt()));
- C->setChunkSize(Reader->Record.readSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setDistScheduleKindLoc(Reader->ReadSourceLocation());
- C->setCommaLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
- C->setDefaultmapKind(
- static_cast<OpenMPDefaultmapClauseKind>(Reader->Record.readInt()));
- C->setDefaultmapModifier(
- static_cast<OpenMPDefaultmapClauseModifier>(Reader->Record.readInt()));
- C->setLParenLoc(Reader->ReadSourceLocation());
- C->setDefaultmapModifierLoc(Reader->ReadSourceLocation());
- C->setDefaultmapKindLoc(Reader->ReadSourceLocation());
-}
-
-void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- auto NumVars = C->varlist_size();
- auto UniqueDecls = C->getUniqueDeclarationsNum();
- auto TotalLists = C->getTotalComponentListNum();
- auto TotalComponents = C->getTotalComponentsNum();
-
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
-
- SmallVector<ValueDecl *, 16> Decls;
- Decls.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
- C->setUniqueDecls(Decls);
-
- SmallVector<unsigned, 16> ListsPerDecl;
- ListsPerDecl.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Reader->Record.readInt());
- C->setDeclNumLists(ListsPerDecl);
-
- SmallVector<unsigned, 32> ListSizes;
- ListSizes.reserve(TotalLists);
- for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Reader->Record.readInt());
- C->setComponentListSizes(ListSizes);
-
- SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
- Components.reserve(TotalComponents);
- for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Record.readSubExpr();
- auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
- }
- C->setComponents(Components, ListSizes);
-}
-
-void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- auto NumVars = C->varlist_size();
- auto UniqueDecls = C->getUniqueDeclarationsNum();
- auto TotalLists = C->getTotalComponentListNum();
- auto TotalComponents = C->getTotalComponentsNum();
-
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
-
- SmallVector<ValueDecl *, 16> Decls;
- Decls.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
- C->setUniqueDecls(Decls);
-
- SmallVector<unsigned, 16> ListsPerDecl;
- ListsPerDecl.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Reader->Record.readInt());
- C->setDeclNumLists(ListsPerDecl);
-
- SmallVector<unsigned, 32> ListSizes;
- ListSizes.reserve(TotalLists);
- for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Reader->Record.readInt());
- C->setComponentListSizes(ListSizes);
-
- SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
- Components.reserve(TotalComponents);
- for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Record.readSubExpr();
- auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
- }
- C->setComponents(Components, ListSizes);
-}
-
-void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- auto NumVars = C->varlist_size();
- auto UniqueDecls = C->getUniqueDeclarationsNum();
- auto TotalLists = C->getTotalComponentListNum();
- auto TotalComponents = C->getTotalComponentsNum();
-
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setPrivateCopies(Vars);
- Vars.clear();
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setInits(Vars);
-
- SmallVector<ValueDecl *, 16> Decls;
- Decls.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
- C->setUniqueDecls(Decls);
-
- SmallVector<unsigned, 16> ListsPerDecl;
- ListsPerDecl.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Reader->Record.readInt());
- C->setDeclNumLists(ListsPerDecl);
-
- SmallVector<unsigned, 32> ListSizes;
- ListSizes.reserve(TotalLists);
- for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Reader->Record.readInt());
- C->setComponentListSizes(ListSizes);
-
- SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
- Components.reserve(TotalComponents);
- for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Record.readSubExpr();
- auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
- }
- C->setComponents(Components, ListSizes);
-}
-
-void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation());
- auto NumVars = C->varlist_size();
- auto UniqueDecls = C->getUniqueDeclarationsNum();
- auto TotalLists = C->getTotalComponentListNum();
- auto TotalComponents = C->getTotalComponentsNum();
-
- SmallVector<Expr *, 16> Vars;
- Vars.reserve(NumVars);
- for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Record.readSubExpr());
- C->setVarRefs(Vars);
- Vars.clear();
-
- SmallVector<ValueDecl *, 16> Decls;
- Decls.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
- C->setUniqueDecls(Decls);
-
- SmallVector<unsigned, 16> ListsPerDecl;
- ListsPerDecl.reserve(UniqueDecls);
- for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Reader->Record.readInt());
- C->setDeclNumLists(ListsPerDecl);
-
- SmallVector<unsigned, 32> ListSizes;
- ListSizes.reserve(TotalLists);
- for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Reader->Record.readInt());
- C->setComponentListSizes(ListSizes);
-
- SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
- Components.reserve(TotalComponents);
- for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Record.readSubExpr();
- auto *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
- Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
- AssociatedExpr, AssociatedDecl));
- }
- C->setComponents(Components, ListSizes);
-}
-
-//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
E->setLocStart(ReadSourceLocation());
E->setLocEnd(ReadSourceLocation());
- OMPClauseReader ClauseReader(this, Record);
+ OMPClauseReader ClauseReader(Record);
SmallVector<OMPClause *, 5> Clauses;
for (unsigned i = 0; i < E->getNumClauses(); ++i)
Clauses.push_back(ClauseReader.readClause());
@@ -2737,6 +1955,8 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
D->setCombinedCond(Record.readSubExpr());
D->setCombinedNextLowerBound(Record.readSubExpr());
D->setCombinedNextUpperBound(Record.readSubExpr());
+ D->setCombinedDistCond(Record.readSubExpr());
+ D->setCombinedParForInDistCond(Record.readSubExpr());
}
SmallVector<Expr *, 4> Sub;
unsigned CollapsedNum = D->getCollapsedNumber();
@@ -3142,7 +2362,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_CASE:
- S = new (Context) CaseStmt(Empty);
+ S = CaseStmt::CreateEmpty(
+ Context,
+ /*CaseStmtIsGNURange*/ Record[ASTStmtReader::NumStmtFields + 3]);
break;
case STMT_DEFAULT:
@@ -3160,15 +2382,24 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_IF:
- S = new (Context) IfStmt(Empty);
+ S = IfStmt::CreateEmpty(
+ Context,
+ /* HasElse=*/Record[ASTStmtReader::NumStmtFields + 1],
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields + 2],
+ /* HasInit=*/Record[ASTStmtReader::NumStmtFields + 3]);
break;
case STMT_SWITCH:
- S = new (Context) SwitchStmt(Empty);
+ S = SwitchStmt::CreateEmpty(
+ Context,
+ /* HasInit=*/Record[ASTStmtReader::NumStmtFields],
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields + 1]);
break;
case STMT_WHILE:
- S = new (Context) WhileStmt(Empty);
+ S = WhileStmt::CreateEmpty(
+ Context,
+ /* HasVar=*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_DO:
@@ -3196,7 +2427,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_RETURN:
- S = new (Context) ReturnStmt(Empty);
+ S = ReturnStmt::CreateEmpty(
+ Context, /* HasNRVOCandidate=*/Record[ASTStmtReader::NumStmtFields]);
break;
case STMT_DECL:
@@ -3212,12 +2444,18 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_CAPTURED:
- S = CapturedStmt::CreateDeserialized(Context,
- Record[ASTStmtReader::NumStmtFields]);
+ S = CapturedStmt::CreateDeserialized(
+ Context, Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case EXPR_CONSTANT:
+ S = new (Context) ConstantExpr(Empty);
break;
case EXPR_PREDEFINED:
- S = new (Context) PredefinedExpr(Empty);
+ S = PredefinedExpr::CreateEmpty(
+ Context,
+ /*HasFunctionName*/ Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_DECL_REF:
@@ -3243,8 +2481,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_STRING_LITERAL:
- S = StringLiteral::CreateEmpty(Context,
- Record[ASTStmtReader::NumExprFields + 1]);
+ S = StringLiteral::CreateEmpty(
+ Context,
+ /* NumConcatenated=*/Record[ASTStmtReader::NumExprFields],
+ /* Length=*/Record[ASTStmtReader::NumExprFields + 1],
+ /* CharByteWidth=*/Record[ASTStmtReader::NumExprFields + 2]);
break;
case EXPR_CHARACTER_LITERAL:
@@ -3256,7 +2497,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_PAREN_LIST:
- S = new (Context) ParenListExpr(Empty);
+ S = ParenListExpr::CreateEmpty(
+ Context,
+ /* NumExprs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_UNARY_OPERATOR:
@@ -3282,7 +2525,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CALL:
- S = new (Context) CallExpr(Context, Stmt::CallExprClass, Empty);
+ S = CallExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_MEMBER: {
@@ -3872,15 +3116,19 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
}
case EXPR_CXX_OPERATOR_CALL:
- S = new (Context) CXXOperatorCallExpr(Context, Empty);
+ S = CXXOperatorCallExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_CXX_MEMBER_CALL:
- S = new (Context) CXXMemberCallExpr(Context, Empty);
+ S = CXXMemberCallExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_CXX_CONSTRUCT:
- S = new (Context) CXXConstructExpr(Empty);
+ S = CXXConstructExpr::CreateEmpty(
+ Context,
+ /* NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CXX_INHERITED_CTOR_INIT:
@@ -3888,7 +3136,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_TEMPORARY_OBJECT:
- S = new (Context) CXXTemporaryObjectExpr(Empty);
+ S = CXXTemporaryObjectExpr::CreateEmpty(
+ Context,
+ /* NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CXX_STATIC_CAST:
@@ -3916,7 +3166,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_USER_DEFINED_LITERAL:
- S = new (Context) UserDefinedLiteral(Context, Empty);
+ S = UserDefinedLiteral::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_CXX_STD_INITIALIZER_LIST:
@@ -3980,7 +3231,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_NEW:
- S = new (Context) CXXNewExpr(Empty);
+ S = CXXNewExpr::CreateEmpty(
+ Context,
+ /*IsArray=*/Record[ASTStmtReader::NumExprFields],
+ /*HasInit=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*NumPlacementArgs=*/Record[ASTStmtReader::NumExprFields + 2],
+ /*IsParenTypeId=*/Record[ASTStmtReader::NumExprFields + 3]);
break;
case EXPR_CXX_DELETE:
@@ -3997,11 +3253,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
- S = CXXDependentScopeMemberExpr::CreateEmpty(Context,
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
- : 0);
+ S = CXXDependentScopeMemberExpr::CreateEmpty(
+ Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*HasFirstQualifierFoundInScope=*/
+ Record[ASTStmtReader::NumExprFields + 2]);
break;
case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
@@ -4018,19 +3275,25 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_UNRESOLVED_MEMBER:
- S = UnresolvedMemberExpr::CreateEmpty(Context,
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
- : 0);
+ S = UnresolvedMemberExpr::CreateEmpty(
+ Context,
+ /*NumResults=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*NumTemplateArgs=*/
+ Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 2]
+ : 0);
break;
case EXPR_CXX_UNRESOLVED_LOOKUP:
- S = UnresolvedLookupExpr::CreateEmpty(Context,
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
- : 0);
+ S = UnresolvedLookupExpr::CreateEmpty(
+ Context,
+ /*NumResults=*/Record[ASTStmtReader::NumExprFields],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*NumTemplateArgs=*/
+ Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 2]
+ : 0);
break;
case EXPR_TYPE_TRAIT:
@@ -4086,7 +3349,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CUDA_KERNEL_CALL:
- S = new (Context) CUDAKernelCallExpr(Context, Empty);
+ S = CUDAKernelCallExpr::CreateEmpty(
+ Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Empty);
break;
case EXPR_ASTYPE:
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 1a8d806e9d24..37adcb70640d 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -310,7 +310,7 @@ void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Record.push_back(T->isVariadic());
Record.push_back(T->hasTrailingReturn());
- Record.push_back(T->getTypeQuals());
+ Record.push_back(T->getTypeQuals().getAsOpaqueValue());
Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
addExceptionSpec(T, Record);
@@ -770,19 +770,7 @@ void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
}
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
- if (TL.hasAttrOperand()) {
- SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
- }
- if (TL.hasAttrExprOperand()) {
- Expr *operand = TL.getAttrExprOperand();
- Record.push_back(operand ? 1 : 0);
- if (operand) Record.AddStmt(operand);
- } else if (TL.hasAttrEnumOperand()) {
- Record.AddSourceLocation(TL.getAttrEnumOperandLoc());
- }
+ Record.AddAttr(TL.getAttr());
}
void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
@@ -1707,7 +1695,6 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Detailed record is important since it is used for the module cache hash.
Record.push_back(PPOpts.DetailedRecord);
AddString(PPOpts.ImplicitPCHInclude, Record);
- AddString(PPOpts.ImplicitPTHInclude, Record);
Record.push_back(static_cast<unsigned>(PPOpts.ObjCXXARCStandardLibrary));
Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record);
@@ -2507,8 +2494,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
MacroIdentifiers.push_back(Id.second);
// Sort the set of macro definitions that need to be serialized by the
// name of the macro, to provide a stable ordering.
- llvm::sort(MacroIdentifiers.begin(), MacroIdentifiers.end(),
- llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(MacroIdentifiers, llvm::less_ptr<IdentifierInfo>());
// Emit the macro directives as a list and associate the offset with the
// identifier they belong to.
@@ -3242,8 +3228,7 @@ void ASTWriter::WriteFileDeclIDsMap() {
SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs(
FileDeclIDs.begin(), FileDeclIDs.end());
- llvm::sort(SortedFileDeclIDs.begin(), SortedFileDeclIDs.end(),
- llvm::less_first());
+ llvm::sort(SortedFileDeclIDs, llvm::less_first());
// Join the vectors of DeclIDs from all files.
SmallVector<DeclID, 256> FileGroupedDeclIDs;
@@ -3586,7 +3571,7 @@ class ASTIdentifierTableTrait {
II->isPoisoned() ||
(IsModule ? II->hasRevertedBuiltin() : II->getObjCOrBuiltinID()) ||
II->hasRevertedTokenIDToIdentifier() ||
- (NeedDecls && II->getFETokenInfo<void>()))
+ (NeedDecls && II->getFETokenInfo()))
return true;
return false;
@@ -3749,7 +3734,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
IIs.push_back(ID.second);
// Sort the identifiers lexicographically before getting them references so
// that their order is stable.
- llvm::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(IIs, llvm::less_ptr<IdentifierInfo>());
for (const IdentifierInfo *II : IIs)
if (Trait.isInterestingNonMacroIdentifier(II))
getIdentifierRef(II);
@@ -3960,7 +3945,8 @@ public:
bool ASTWriter::isLookupResultExternal(StoredDeclsList &Result,
DeclContext *DC) {
- return Result.hasExternalDecls() && DC->NeedToReconcileExternalVisibleStorage;
+ return Result.hasExternalDecls() &&
+ DC->hasNeedToReconcileExternalVisibleStorage();
}
bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result,
@@ -3975,8 +3961,8 @@ bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result,
void
ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
llvm::SmallVectorImpl<char> &LookupTable) {
- assert(!ConstDC->HasLazyLocalLexicalLookups &&
- !ConstDC->HasLazyExternalLexicalLookups &&
+ assert(!ConstDC->hasLazyLocalLexicalLookups() &&
+ !ConstDC->hasLazyExternalLexicalLookups() &&
"must call buildLookups first");
// FIXME: We need to build the lookups table, which is logically const.
@@ -4046,7 +4032,7 @@ ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC,
}
// Sort the names into a stable order.
- llvm::sort(Names.begin(), Names.end());
+ llvm::sort(Names);
if (auto *D = dyn_cast<CXXRecordDecl>(DC)) {
// We need to establish an ordering of constructor and conversion function
@@ -4183,7 +4169,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
std::make_pair(Entry.first, Entry.second.getLookupResult()));
}
- llvm::sort(LookupResults.begin(), LookupResults.end(), llvm::less_first());
+ llvm::sort(LookupResults, llvm::less_first());
for (auto &NameAndResult : LookupResults) {
DeclarationName Name = NameAndResult.first;
DeclContext::lookup_result Result = NameAndResult.second;
@@ -4480,16 +4466,21 @@ void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
// General Serialization Routines
//===----------------------------------------------------------------------===//
-/// Emit the list of attributes to the specified record.
-void ASTRecordWriter::AddAttributes(ArrayRef<const Attr *> Attrs) {
+void ASTRecordWriter::AddAttr(const Attr *A) {
auto &Record = *this;
- Record.push_back(Attrs.size());
- for (const auto *A : Attrs) {
- Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
- Record.AddSourceRange(A->getRange());
+ if (!A)
+ return Record.push_back(0);
+ Record.push_back(A->getKind() + 1); // FIXME: stable encoding, target attrs
+ Record.AddSourceRange(A->getRange());
#include "clang/Serialization/AttrPCHWrite.inc"
- }
+}
+
+/// Emit the list of attributes to the specified record.
+void ASTRecordWriter::AddAttributes(ArrayRef<const Attr *> Attrs) {
+ push_back(Attrs.size());
+ for (const auto *A : Attrs)
+ AddAttr(A);
}
void ASTWriter::AddToken(const Token &Tok, RecordDataImpl &Record) {
@@ -4881,7 +4872,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
IIs.push_back(II);
}
// Sort the identifiers to visit based on their name.
- llvm::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>());
+ llvm::sort(IIs, llvm::less_ptr<IdentifierInfo>());
for (const IdentifierInfo *II : IIs) {
for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
DEnd = SemaRef.IdResolver.end();
@@ -5022,13 +5013,16 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
WriteOpenCLExtensionTypes(SemaRef);
- WriteOpenCLExtensionDecls(SemaRef);
WriteCUDAPragmas(SemaRef);
// If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
+ // We need to have information about submodules to correctly deserialize
+ // decls from OpenCLExtensionDecls block
+ WriteOpenCLExtensionDecls(SemaRef);
+
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
// Write the record containing external, unnamed definitions.
@@ -5118,7 +5112,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
};
// Sort and deduplicate module IDs.
- llvm::sort(Imports.begin(), Imports.end(), Cmp);
+ llvm::sort(Imports, Cmp);
Imports.erase(std::unique(Imports.begin(), Imports.end(), Eq),
Imports.end());
@@ -5254,7 +5248,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
}
Record.push_back(RD->getTagKind());
Record.AddSourceLocation(RD->getLocation());
- Record.AddSourceLocation(RD->getLocStart());
+ Record.AddSourceLocation(RD->getBeginLoc());
Record.AddSourceRange(RD->getBraceRange());
// Instantiation may change attributes; write them all out afresh.
@@ -5295,6 +5289,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
break;
case UPD_DECL_MARKED_OPENMP_DECLARETARGET:
+ Record.push_back(D->getAttr<OMPDeclareTargetDeclAttr>()->getMapType());
Record.AddSourceRange(
D->getAttr<OMPDeclareTargetDeclAttr>()->getRange());
break;
@@ -6472,3 +6467,489 @@ void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
DeclsToEmitEvenIfUnreferenced.push_back(D);
}
+
+//===----------------------------------------------------------------------===//
+//// OMPClause Serialization
+////===----------------------------------------------------------------------===//
+
+void OMPClauseWriter::writeClause(OMPClause *C) {
+ Record.push_back(C->getClauseKind());
+ Visit(C);
+ Record.AddSourceLocation(C->getBeginLoc());
+ Record.AddSourceLocation(C->getEndLoc());
+}
+
+void OMPClauseWriter::VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C) {
+ Record.push_back(C->getCaptureRegion());
+ Record.AddStmt(C->getPreInitStmt());
+}
+
+void OMPClauseWriter::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getPostUpdateExpr());
+}
+
+void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.push_back(C->getNameModifier());
+ Record.AddSourceLocation(C->getNameModifierLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.AddStmt(C->getCondition());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPFinalClause(OMPFinalClause *C) {
+ Record.AddStmt(C->getCondition());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getNumThreads());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPSafelenClause(OMPSafelenClause *C) {
+ Record.AddStmt(C->getSafelen());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
+ Record.AddStmt(C->getSimdlen());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
+ Record.AddStmt(C->getNumForLoops());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
+ Record.push_back(C->getDefaultKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getDefaultKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) {
+ Record.push_back(C->getProcBindKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getProcBindKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.push_back(C->getScheduleKind());
+ Record.push_back(C->getFirstScheduleModifier());
+ Record.push_back(C->getSecondScheduleModifier());
+ Record.AddStmt(C->getChunkSize());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getFirstScheduleModifierLoc());
+ Record.AddSourceLocation(C->getSecondScheduleModifierLoc());
+ Record.AddSourceLocation(C->getScheduleKindLoc());
+ Record.AddSourceLocation(C->getCommaLoc());
+}
+
+void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *C) {
+ Record.push_back(C->getLoopNumIterations().size());
+ Record.AddStmt(C->getNumForLoops());
+ for (Expr *NumIter : C->getLoopNumIterations())
+ Record.AddStmt(NumIter);
+ for (unsigned I = 0, E = C->getLoopNumIterations().size(); I <E; ++I)
+ Record.AddStmt(C->getLoopCounter(I));
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPNowaitClause(OMPNowaitClause *) {}
+
+void OMPClauseWriter::VisitOMPUntiedClause(OMPUntiedClause *) {}
+
+void OMPClauseWriter::VisitOMPMergeableClause(OMPMergeableClause *) {}
+
+void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
+
+void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
+
+void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
+
+void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
+
+void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
+
+void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
+
+void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
+
+void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
+
+void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
+ Record.AddStmt(VE);
+ }
+}
+
+void OMPClauseWriter::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPreInit(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->private_copies()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->inits()) {
+ Record.AddStmt(VE);
+ }
+}
+
+void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPostUpdate(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *E : C->private_copies())
+ Record.AddStmt(E);
+ for (auto *E : C->source_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPostUpdate(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getNameInfo());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *VE : C->privates())
+ Record.AddStmt(VE);
+ for (auto *E : C->lhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->rhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->reduction_ops())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPostUpdate(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getNameInfo());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *VE : C->privates())
+ Record.AddStmt(VE);
+ for (auto *E : C->lhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->rhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->reduction_ops())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPInReductionClause(OMPInReductionClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPostUpdate(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
+ Record.AddDeclarationNameInfo(C->getNameInfo());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *VE : C->privates())
+ Record.AddStmt(VE);
+ for (auto *E : C->lhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->rhs_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->reduction_ops())
+ Record.AddStmt(E);
+ for (auto *E : C->taskgroup_descriptors())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPLinearClause(OMPLinearClause *C) {
+ Record.push_back(C->varlist_size());
+ VisitOMPClauseWithPostUpdate(C);
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ Record.push_back(C->getModifier());
+ Record.AddSourceLocation(C->getModifierLoc());
+ for (auto *VE : C->varlists()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->privates()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->inits()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->updates()) {
+ Record.AddStmt(VE);
+ }
+ for (auto *VE : C->finals()) {
+ Record.AddStmt(VE);
+ }
+ Record.AddStmt(C->getStep());
+ Record.AddStmt(C->getCalcStep());
+}
+
+void OMPClauseWriter::VisitOMPAlignedClause(OMPAlignedClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ Record.AddStmt(C->getAlignment());
+}
+
+void OMPClauseWriter::VisitOMPCopyinClause(OMPCopyinClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *E : C->source_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (auto *E : C->source_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->destination_exprs())
+ Record.AddStmt(E);
+ for (auto *E : C->assignment_ops())
+ Record.AddStmt(E);
+}
+
+void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+}
+
+void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getNumLoops());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.push_back(C->getDependencyKind());
+ Record.AddSourceLocation(C->getDependencyLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ Record.AddStmt(C->getLoopData(I));
+}
+
+void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getDevice());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (unsigned I = 0; I < OMPMapClause::NumberOfModifiers; ++I) {
+ Record.push_back(C->getMapTypeModifier(I));
+ Record.AddSourceLocation(C->getMapTypeModifierLoc(I));
+ }
+ Record.push_back(C->getMapType());
+ Record.AddSourceLocation(C->getMapLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
+void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getNumTeams());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getThreadLimit());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPPriorityClause(OMPPriorityClause *C) {
+ Record.AddStmt(C->getPriority());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
+ Record.AddStmt(C->getGrainsize());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
+ Record.AddStmt(C->getNumTasks());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPHintClause(OMPHintClause *C) {
+ Record.AddStmt(C->getHint());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPDistScheduleClause(OMPDistScheduleClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.push_back(C->getDistScheduleKind());
+ Record.AddStmt(C->getChunkSize());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getDistScheduleKindLoc());
+ Record.AddSourceLocation(C->getCommaLoc());
+}
+
+void OMPClauseWriter::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
+ Record.push_back(C->getDefaultmapKind());
+ Record.push_back(C->getDefaultmapModifier());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getDefaultmapModifierLoc());
+ Record.AddSourceLocation(C->getDefaultmapKindLoc());
+}
+
+void OMPClauseWriter::VisitOMPToClause(OMPToClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
+void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
+void OMPClauseWriter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *VE : C->private_copies())
+ Record.AddStmt(VE);
+ for (auto *VE : C->inits())
+ Record.AddStmt(VE);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
+void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
+void OMPClauseWriter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
+
+void OMPClauseWriter::VisitOMPUnifiedSharedMemoryClause(
+ OMPUnifiedSharedMemoryClause *) {}
+
+void OMPClauseWriter::VisitOMPReverseOffloadClause(OMPReverseOffloadClause *) {}
+
+void
+OMPClauseWriter::VisitOMPDynamicAllocatorsClause(OMPDynamicAllocatorsClause *) {
+}
+
+void OMPClauseWriter::VisitOMPAtomicDefaultMemOrderClause(
+ OMPAtomicDefaultMemOrderClause *C) {
+ Record.push_back(C->getAtomicDefaultMemOrderKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getAtomicDefaultMemOrderKindKwLoc());
+}
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 7286f2cac178..002b43f81121 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Serialization/ASTReader.h"
@@ -144,6 +145,7 @@ namespace clang {
void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
+ void VisitOMPRequiresDecl(OMPRequiresDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
@@ -328,7 +330,7 @@ void ASTDeclWriter::VisitPragmaCommentDecl(PragmaCommentDecl *D) {
StringRef Arg = D->getArg();
Record.push_back(Arg.size());
VisitDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Record.push_back(D->getCommentKind());
Record.AddString(Arg);
Code = serialization::DECL_PRAGMA_COMMENT;
@@ -340,7 +342,7 @@ void ASTDeclWriter::VisitPragmaDetectMismatchDecl(
StringRef Value = D->getValue();
Record.push_back(Name.size() + 1 + Value.size());
VisitDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Record.AddString(Name);
Record.AddString(Value);
Code = serialization::DECL_PRAGMA_DETECT_MISMATCH;
@@ -360,7 +362,7 @@ void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
VisitNamedDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Record.AddTypeRef(QualType(D->getTypeForDecl(), 0));
}
@@ -529,28 +531,27 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
// FunctionDecl's body is handled last at ASTWriterDecl::Visit,
// after everything else is written.
-
- Record.push_back((int)D->SClass); // FIXME: stable encoding
- Record.push_back(D->IsInline);
- Record.push_back(D->IsInlineSpecified);
- Record.push_back(D->IsExplicitSpecified);
- Record.push_back(D->IsVirtualAsWritten);
- Record.push_back(D->IsPure);
- Record.push_back(D->HasInheritedPrototype);
- Record.push_back(D->HasWrittenPrototype);
- Record.push_back(D->IsDeleted);
- Record.push_back(D->IsTrivial);
- Record.push_back(D->IsTrivialForCall);
- Record.push_back(D->IsDefaulted);
- Record.push_back(D->IsExplicitlyDefaulted);
- Record.push_back(D->HasImplicitReturnZero);
- Record.push_back(D->IsConstexpr);
- Record.push_back(D->UsesSEHTry);
- Record.push_back(D->HasSkippedBody);
- Record.push_back(D->IsMultiVersion);
- Record.push_back(D->IsLateTemplateParsed);
+ Record.push_back(static_cast<int>(D->getStorageClass())); // FIXME: stable encoding
+ Record.push_back(D->isInlineSpecified());
+ Record.push_back(D->isInlined());
+ Record.push_back(D->isExplicitSpecified());
+ Record.push_back(D->isVirtualAsWritten());
+ Record.push_back(D->isPure());
+ Record.push_back(D->hasInheritedPrototype());
+ Record.push_back(D->hasWrittenPrototype());
+ Record.push_back(D->isDeletedBit());
+ Record.push_back(D->isTrivial());
+ Record.push_back(D->isTrivialForCall());
+ Record.push_back(D->isDefaulted());
+ Record.push_back(D->isExplicitlyDefaulted());
+ Record.push_back(D->hasImplicitReturnZero());
+ Record.push_back(D->isConstexpr());
+ Record.push_back(D->usesSEHTry());
+ Record.push_back(D->hasSkippedBody());
+ Record.push_back(D->isMultiVersion());
+ Record.push_back(D->isLateTemplateParsed());
Record.push_back(D->getLinkageInternal());
- Record.AddSourceLocation(D->getLocEnd());
+ Record.AddSourceLocation(D->getEndLoc());
Record.push_back(D->getODRHash());
@@ -628,7 +629,7 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
void ASTDeclWriter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
VisitFunctionDecl(D);
- Record.push_back(D->IsCopyDeductionCandidate);
+ Record.push_back(D->isCopyDeductionCandidate());
Code = serialization::DECL_CXX_DEDUCTION_GUIDE;
}
@@ -648,12 +649,12 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Record.push_back(D->isVariadic());
Record.push_back(D->isPropertyAccessor());
Record.push_back(D->isDefined());
- Record.push_back(D->IsOverriding);
- Record.push_back(D->HasSkippedBody);
+ Record.push_back(D->isOverriding());
+ Record.push_back(D->hasSkippedBody());
- Record.push_back(D->IsRedeclaration);
- Record.push_back(D->HasRedeclaration);
- if (D->HasRedeclaration) {
+ Record.push_back(D->isRedeclaration());
+ Record.push_back(D->hasRedeclaration());
+ if (D->hasRedeclaration()) {
assert(Context.getObjCMethodRedeclaration(D));
Record.AddDeclRef(Context.getObjCMethodRedeclaration(D));
}
@@ -665,12 +666,12 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Record.push_back(D->hasRelatedResultType());
Record.AddTypeRef(D->getReturnType());
Record.AddTypeSourceInfo(D->getReturnTypeSourceInfo());
- Record.AddSourceLocation(D->getLocEnd());
+ Record.AddSourceLocation(D->getEndLoc());
Record.push_back(D->param_size());
for (const auto *P : D->parameters())
Record.AddDeclRef(P);
- Record.push_back(D->SelLocsKind);
+ Record.push_back(D->getSelLocsKind());
unsigned NumStoredSelLocs = D->getNumStoredSelLocs();
SourceLocation *SelLocs = D->getStoredSelLocs();
Record.push_back(NumStoredSelLocs);
@@ -854,7 +855,7 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
VisitDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Record.AddDeclRef(D->getPropertyDecl());
Record.AddDeclRef(D->getPropertyIvarDecl());
Record.AddSourceLocation(D->getPropertyIvarDeclLoc());
@@ -921,13 +922,13 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->getStorageClass());
Record.push_back(D->getTSCSpec());
Record.push_back(D->getInitStyle());
+ Record.push_back(D->isARCPseudoStrong());
if (!isa<ParmVarDecl>(D)) {
Record.push_back(D->isThisDeclarationADemotedDefinition());
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
Record.push_back(D->isObjCForDecl());
- Record.push_back(D->isARCPseudoStrong());
Record.push_back(D->isInline());
Record.push_back(D->isInlineSpecified());
Record.push_back(D->isConstexpr());
@@ -937,6 +938,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(static_cast<unsigned>(IPD->getParameterKind()));
else
Record.push_back(0);
+ Record.push_back(D->isEscapingByref());
}
Record.push_back(D->getLinkageInternal());
@@ -947,6 +949,13 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(0);
}
+ if (D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) {
+ ASTContext::BlockVarCopyInit Init = Writer.Context->getBlockVarCopyInit(D);
+ Record.AddStmt(Init.getCopyExpr());
+ if (Init.getCopyExpr())
+ Record.push_back(Init.canThrow());
+ }
+
if (D->getStorageDuration() == SD_Static) {
bool ModulesCodegen = false;
if (Writer.WritingModule &&
@@ -999,6 +1008,8 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
!D->isConstexpr() &&
!D->isInitCapture() &&
!D->isPreviousDeclInSameBlockScope() &&
+ !(D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) &&
+ !D->isEscapingByref() &&
D->getStorageDuration() != SD_Static &&
!D->getMemberSpecializationInfo())
AbbrevToUse = Writer.getDeclVarAbbrev();
@@ -1098,6 +1109,7 @@ void ASTDeclWriter::VisitBlockDecl(BlockDecl *D) {
Record.push_back(D->isVariadic());
Record.push_back(D->blockMissingReturnType());
Record.push_back(D->isConversionFromLambda());
+ Record.push_back(D->doesNotEscape());
Record.push_back(D->capturesCXXThis());
Record.push_back(D->getNumCaptures());
for (const auto &capture : D->captures()) {
@@ -1142,7 +1154,7 @@ void ASTDeclWriter::VisitExportDecl(ExportDecl *D) {
void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
VisitNamedDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Code = serialization::DECL_LABEL;
}
@@ -1151,7 +1163,7 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
VisitRedeclarable(D);
VisitNamedDecl(D);
Record.push_back(D->isInline());
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
Record.AddSourceLocation(D->getRBraceLoc());
if (D->isOriginalNamespace())
@@ -1275,7 +1287,7 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
// Store (what we currently believe to be) the key function to avoid
// deserializing every method so we can compute it.
- if (D->IsCompleteDefinition)
+ if (D->isCompleteDefinition())
Record.AddDeclRef(Context.getCurrentKeyFunction(D));
Code = serialization::DECL_CXX_RECORD;
@@ -1343,7 +1355,7 @@ void ASTDeclWriter::VisitImportDecl(ImportDecl *D) {
ArrayRef<SourceLocation> IdentifierLocs = D->getIdentifierLocs();
Record.push_back(!IdentifierLocs.empty());
if (IdentifierLocs.empty()) {
- Record.AddSourceLocation(D->getLocEnd());
+ Record.AddSourceLocation(D->getEndLoc());
Record.push_back(1);
} else {
for (unsigned I = 0, N = IdentifierLocs.size(); I != N; ++I)
@@ -1731,10 +1743,23 @@ void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
Code = serialization::DECL_OMP_THREADPRIVATE;
}
+void ASTDeclWriter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
+ Record.push_back(D->clauselist_size());
+ VisitDecl(D);
+ OMPClauseWriter ClauseWriter(Record);
+ for (OMPClause *C : D->clauselists())
+ ClauseWriter.writeClause(C);
+ Code = serialization::DECL_OMP_REQUIRES;
+}
+
void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
VisitValueDecl(D);
- Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getBeginLoc());
+ Record.AddStmt(D->getCombinerIn());
+ Record.AddStmt(D->getCombinerOut());
Record.AddStmt(D->getCombiner());
+ Record.AddStmt(D->getInitOrig());
+ Record.AddStmt(D->getInitPriv());
Record.AddStmt(D->getInitializer());
Record.push_back(D->getInitializerKind());
Record.AddDeclRef(D->getPrevDeclInScope());
@@ -1961,6 +1986,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // SClass
Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
+ Abv->Add(BitCodeAbbrevOp(0)); // ARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // Linkage
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
@@ -2037,18 +2063,19 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // SClass
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // TSCSpec
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // InitStyle
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsThisDeclarationADemotedDefinition
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isObjCForDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // isInline
Abv->Add(BitCodeAbbrevOp(0)); // isInlineSpecified
Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr
Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture
Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind
+ Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // IsInitICE (local)
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
@@ -2229,8 +2256,7 @@ static bool isRequiredDecl(const Decl *D, ASTContext &Context,
// File scoped assembly or obj-c or OMP declare target implementation must be
// seen.
- if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D) ||
- D->hasAttr<OMPDeclareTargetDeclAttr>())
+ if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
return true;
if (WritingModule && (isa<VarDecl>(D) || isa<ImportDecl>(D))) {
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 48c3f79a4380..6f8b86edcdfc 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -73,7 +73,7 @@ void ASTStmtWriter::VisitStmt(Stmt *S) {
void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
Record.AddSourceLocation(S->getSemiLoc());
- Record.push_back(S->HasLeadingEmptyMacro);
+ Record.push_back(S->NullStmtBits.HasLeadingEmptyMacro);
Code = serialization::STMT_NULL;
}
@@ -96,10 +96,13 @@ void ASTStmtWriter::VisitSwitchCase(SwitchCase *S) {
void ASTStmtWriter::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
+ Record.push_back(S->caseStmtIsGNURange());
Record.AddStmt(S->getLHS());
- Record.AddStmt(S->getRHS());
Record.AddStmt(S->getSubStmt());
- Record.AddSourceLocation(S->getEllipsisLoc());
+ if (S->caseStmtIsGNURange()) {
+ Record.AddStmt(S->getRHS());
+ Record.AddSourceLocation(S->getEllipsisLoc());
+ }
Code = serialization::STMT_CASE;
}
@@ -128,25 +131,50 @@ void ASTStmtWriter::VisitAttributedStmt(AttributedStmt *S) {
void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
+
+ bool HasElse = S->getElse() != nullptr;
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ bool HasInit = S->getInit() != nullptr;
+
Record.push_back(S->isConstexpr());
- Record.AddStmt(S->getInit());
- Record.AddDeclRef(S->getConditionVariable());
+ Record.push_back(HasElse);
+ Record.push_back(HasVar);
+ Record.push_back(HasInit);
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getThen());
- Record.AddStmt(S->getElse());
+ if (HasElse)
+ Record.AddStmt(S->getElse());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+ if (HasInit)
+ Record.AddStmt(S->getInit());
+
Record.AddSourceLocation(S->getIfLoc());
- Record.AddSourceLocation(S->getElseLoc());
+ if (HasElse)
+ Record.AddSourceLocation(S->getElseLoc());
+
Code = serialization::STMT_IF;
}
void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- Record.AddStmt(S->getInit());
- Record.AddDeclRef(S->getConditionVariable());
+
+ bool HasInit = S->getInit() != nullptr;
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ Record.push_back(HasInit);
+ Record.push_back(HasVar);
+ Record.push_back(S->isAllEnumCasesCovered());
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
+ if (HasInit)
+ Record.AddStmt(S->getInit());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+
Record.AddSourceLocation(S->getSwitchLoc());
- Record.push_back(S->isAllEnumCasesCovered());
+
for (SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase())
Record.push_back(Writer.RecordSwitchCaseID(SC));
@@ -155,9 +183,15 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- Record.AddDeclRef(S->getConditionVariable());
+
+ bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
+ Record.push_back(HasVar);
+
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
+ if (HasVar)
+ Record.AddDeclRef(S->getConditionVariable());
+
Record.AddSourceLocation(S->getWhileLoc());
Code = serialization::STMT_WHILE;
}
@@ -215,15 +249,21 @@ void ASTStmtWriter::VisitBreakStmt(BreakStmt *S) {
void ASTStmtWriter::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
+
+ bool HasNRVOCandidate = S->getNRVOCandidate() != nullptr;
+ Record.push_back(HasNRVOCandidate);
+
Record.AddStmt(S->getRetValue());
+ if (HasNRVOCandidate)
+ Record.AddDeclRef(S->getNRVOCandidate());
+
Record.AddSourceLocation(S->getReturnLoc());
- Record.AddDeclRef(S->getNRVOCandidate());
Code = serialization::STMT_RETURN;
}
void ASTStmtWriter::VisitDeclStmt(DeclStmt *S) {
VisitStmt(S);
- Record.AddSourceLocation(S->getStartLoc());
+ Record.AddSourceLocation(S->getBeginLoc());
Record.AddSourceLocation(S->getEndLoc());
DeclGroupRef DG = S->getDeclGroup();
for (DeclGroupRef::iterator D = DG.begin(), DEnd = DG.end(); D != DEnd; ++D)
@@ -386,11 +426,21 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Record.push_back(E->getObjectKind());
}
+void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
+ VisitExpr(E);
+ Record.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_CONSTANT;
+}
+
void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
+
+ bool HasFunctionName = E->getFunctionName() != nullptr;
+ Record.push_back(HasFunctionName);
+ Record.push_back(E->getIdentKind()); // FIXME: stable encoding
Record.AddSourceLocation(E->getLocation());
- Record.push_back(E->getIdentType()); // FIXME: stable encoding
- Record.AddStmt(E->getFunctionName());
+ if (HasFunctionName)
+ Record.AddStmt(E->getFunctionName());
Code = serialization::EXPR_PREDEFINED;
}
@@ -468,17 +518,23 @@ void ASTStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
- Record.push_back(E->getByteLength());
+
+ // Store the various bits of data of StringLiteral.
Record.push_back(E->getNumConcatenated());
+ Record.push_back(E->getLength());
+ Record.push_back(E->getCharByteWidth());
Record.push_back(E->getKind());
Record.push_back(E->isPascal());
- // FIXME: String data should be stored as a blob at the end of the
- // StringLiteral. However, we can't do so now because we have no
- // provision for coping with abbreviations when we're jumping around
- // the AST file during deserialization.
- Record.append(E->getBytes().begin(), E->getBytes().end());
+
+ // Store the trailing array of SourceLocation.
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
Record.AddSourceLocation(E->getStrTokenLoc(I));
+
+ // Store the trailing array of char holding the string data.
+ StringRef StrData = E->getBytes();
+ for (unsigned I = 0, N = E->getByteLength(); I != N; ++I)
+ Record.push_back(StrData[I]);
+
Code = serialization::EXPR_STRING_LITERAL;
}
@@ -503,11 +559,11 @@ void ASTStmtWriter::VisitParenExpr(ParenExpr *E) {
void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
VisitExpr(E);
- Record.push_back(E->NumExprs);
- for (unsigned i=0; i != E->NumExprs; ++i)
- Record.AddStmt(E->Exprs[i]);
- Record.AddSourceLocation(E->LParenLoc);
- Record.AddSourceLocation(E->RParenLoc);
+ Record.push_back(E->getNumExprs());
+ for (auto *SubStmt : E->exprs())
+ Record.AddStmt(SubStmt);
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
Code = serialization::EXPR_PAREN_LIST;
}
@@ -595,6 +651,7 @@ void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
Record.AddStmt(*Arg);
+ Record.push_back(static_cast<unsigned>(E->getADLCallKind()));
Code = serialization::EXPR_CALL;
}
@@ -605,8 +662,8 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
if (E->hasQualifier())
Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
- Record.push_back(E->HasTemplateKWAndArgsInfo);
- if (E->HasTemplateKWAndArgsInfo) {
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ if (E->hasTemplateKWAndArgsInfo()) {
Record.AddSourceLocation(E->getTemplateKeywordLoc());
unsigned NumTemplateArgs = E->getNumTemplateArgs();
Record.push_back(NumTemplateArgs);
@@ -1222,6 +1279,7 @@ void ASTStmtWriter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
Record.AddSourceLocation(S->getCoawaitLoc());
Record.AddSourceLocation(S->getColonLoc());
Record.AddSourceLocation(S->getRParenLoc());
+ Record.AddStmt(S->getInit());
Record.AddStmt(S->getRangeStmt());
Record.AddStmt(S->getBeginStmt());
Record.AddStmt(S->getEndStmt());
@@ -1245,8 +1303,8 @@ void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
- Record.AddSourceRange(E->Range);
Record.push_back(E->getFPFeatures().getInt());
+ Record.AddSourceRange(E->Range);
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
@@ -1257,18 +1315,21 @@ void ASTStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
VisitExpr(E);
+
Record.push_back(E->getNumArgs());
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- Record.AddStmt(E->getArg(I));
- Record.AddDeclRef(E->getConstructor());
- Record.AddSourceLocation(E->getLocation());
Record.push_back(E->isElidable());
Record.push_back(E->hadMultipleCandidates());
Record.push_back(E->isListInitialization());
Record.push_back(E->isStdInitListInitialization());
Record.push_back(E->requiresZeroInitialization());
Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Record.AddSourceLocation(E->getLocation());
+ Record.AddDeclRef(E->getConstructor());
Record.AddSourceRange(E->getParenOrBraceRange());
+
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Record.AddStmt(E->getArg(I));
+
Code = serialization::EXPR_CXX_CONSTRUCT;
}
@@ -1422,20 +1483,27 @@ void ASTStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
- Record.push_back(E->isGlobalNew());
+
Record.push_back(E->isArray());
+ Record.push_back(E->hasInitializer());
+ Record.push_back(E->getNumPlacementArgs());
+ Record.push_back(E->isParenTypeId());
+
+ Record.push_back(E->isGlobalNew());
Record.push_back(E->passAlignment());
Record.push_back(E->doesUsualArrayDeleteWantSize());
- Record.push_back(E->getNumPlacementArgs());
- Record.push_back(E->StoredInitializationStyle);
+ Record.push_back(E->CXXNewExprBits.StoredInitializationStyle);
+
Record.AddDeclRef(E->getOperatorNew());
Record.AddDeclRef(E->getOperatorDelete());
Record.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo());
- Record.AddSourceRange(E->getTypeIdParens());
+ if (E->isParenTypeId())
+ Record.AddSourceRange(E->getTypeIdParens());
Record.AddSourceRange(E->getSourceRange());
Record.AddSourceRange(E->getDirectInitRange());
- for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
- I != e; ++I)
+
+ for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), N = E->raw_arg_end();
+ I != N; ++I)
Record.AddStmt(*I);
Code = serialization::EXPR_CXX_NEW;
@@ -1449,7 +1517,7 @@ void ASTStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.AddDeclRef(E->getOperatorDelete());
Record.AddStmt(E->getArgument());
- Record.AddSourceLocation(E->getSourceRange().getBegin());
+ Record.AddSourceLocation(E->getBeginLoc());
Code = serialization::EXPR_CXX_DELETE;
}
@@ -1486,31 +1554,36 @@ void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
Code = serialization::EXPR_EXPR_WITH_CLEANUPS;
}
-void
-ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+void ASTStmtWriter::VisitCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *E) {
VisitExpr(E);
- // Don't emit anything here, HasTemplateKWAndArgsInfo must be
- // emitted first.
+ // Don't emit anything here (or if you do you will have to update
+ // the corresponding deserialization function).
- Record.push_back(E->HasTemplateKWAndArgsInfo);
- if (E->HasTemplateKWAndArgsInfo) {
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ Record.push_back(E->getNumTemplateArgs());
+ Record.push_back(E->hasFirstQualifierFoundInScope());
+
+ if (E->hasTemplateKWAndArgsInfo()) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
- Record.push_back(ArgInfo.NumTemplateArgs);
AddTemplateKWAndArgsInfo(ArgInfo,
E->getTrailingObjects<TemplateArgumentLoc>());
}
+ Record.push_back(E->isArrow());
+ Record.AddSourceLocation(E->getOperatorLoc());
+ Record.AddTypeRef(E->getBaseType());
+ Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
if (!E->isImplicitAccess())
Record.AddStmt(E->getBase());
else
Record.AddStmt(nullptr);
- Record.AddTypeRef(E->getBaseType());
- Record.push_back(E->isArrow());
- Record.AddSourceLocation(E->getOperatorLoc());
- Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
- Record.AddDeclRef(E->getFirstQualifierFoundInScope());
+
+ if (E->hasFirstQualifierFoundInScope())
+ Record.AddDeclRef(E->getFirstQualifierFoundInScope());
+
Record.AddDeclarationNameInfo(E->MemberNameInfo);
Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_MEMBER;
}
@@ -1522,8 +1595,8 @@ ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
// Don't emit anything here, HasTemplateKWAndArgsInfo must be
// emitted first.
- Record.push_back(E->HasTemplateKWAndArgsInfo);
- if (E->HasTemplateKWAndArgsInfo) {
+ Record.push_back(E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo);
+ if (E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
Record.push_back(ArgInfo.NumTemplateArgs);
@@ -1552,25 +1625,23 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
- // Don't emit anything here, HasTemplateKWAndArgsInfo must be
- // emitted first.
-
- Record.push_back(E->HasTemplateKWAndArgsInfo);
- if (E->HasTemplateKWAndArgsInfo) {
+ Record.push_back(E->getNumDecls());
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ if (E->hasTemplateKWAndArgsInfo()) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
*E->getTrailingASTTemplateKWAndArgsInfo();
Record.push_back(ArgInfo.NumTemplateArgs);
AddTemplateKWAndArgsInfo(ArgInfo, E->getTrailingTemplateArgumentLoc());
}
- Record.push_back(E->getNumDecls());
- for (OverloadExpr::decls_iterator
- OvI = E->decls_begin(), OvE = E->decls_end(); OvI != OvE; ++OvI) {
+ for (OverloadExpr::decls_iterator OvI = E->decls_begin(),
+ OvE = E->decls_end();
+ OvI != OvE; ++OvI) {
Record.AddDeclRef(OvI.getDecl());
Record.push_back(OvI.getAccess());
}
- Record.AddDeclarationNameInfo(E->NameInfo);
+ Record.AddDeclarationNameInfo(E->getNameInfo());
Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
}
@@ -1803,483 +1874,11 @@ void ASTStmtWriter::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
}
//===----------------------------------------------------------------------===//
-// OpenMP Clauses.
-//===----------------------------------------------------------------------===//
-
-namespace clang {
-class OMPClauseWriter : public OMPClauseVisitor<OMPClauseWriter> {
- ASTRecordWriter &Record;
-public:
- OMPClauseWriter(ASTRecordWriter &Record) : Record(Record) {}
-#define OPENMP_CLAUSE(Name, Class) \
- void Visit##Class(Class *S);
-#include "clang/Basic/OpenMPKinds.def"
- void writeClause(OMPClause *C);
- void VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C);
- void VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C);
-};
-}
-
-void OMPClauseWriter::writeClause(OMPClause *C) {
- Record.push_back(C->getClauseKind());
- Visit(C);
- Record.AddSourceLocation(C->getLocStart());
- Record.AddSourceLocation(C->getLocEnd());
-}
-
-void OMPClauseWriter::VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C) {
- Record.push_back(C->getCaptureRegion());
- Record.AddStmt(C->getPreInitStmt());
-}
-
-void OMPClauseWriter::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
- VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getPostUpdateExpr());
-}
-
-void OMPClauseWriter::VisitOMPIfClause(OMPIfClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.push_back(C->getNameModifier());
- Record.AddSourceLocation(C->getNameModifierLoc());
- Record.AddSourceLocation(C->getColonLoc());
- Record.AddStmt(C->getCondition());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPFinalClause(OMPFinalClause *C) {
- Record.AddStmt(C->getCondition());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getNumThreads());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPSafelenClause(OMPSafelenClause *C) {
- Record.AddStmt(C->getSafelen());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
- Record.AddStmt(C->getSimdlen());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPCollapseClause(OMPCollapseClause *C) {
- Record.AddStmt(C->getNumForLoops());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
- Record.push_back(C->getDefaultKind());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getDefaultKindKwLoc());
-}
-
-void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) {
- Record.push_back(C->getProcBindKind());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getProcBindKindKwLoc());
-}
-
-void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.push_back(C->getScheduleKind());
- Record.push_back(C->getFirstScheduleModifier());
- Record.push_back(C->getSecondScheduleModifier());
- Record.AddStmt(C->getChunkSize());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getFirstScheduleModifierLoc());
- Record.AddSourceLocation(C->getSecondScheduleModifierLoc());
- Record.AddSourceLocation(C->getScheduleKindLoc());
- Record.AddSourceLocation(C->getCommaLoc());
-}
-
-void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *C) {
- Record.AddStmt(C->getNumForLoops());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPNowaitClause(OMPNowaitClause *) {}
-
-void OMPClauseWriter::VisitOMPUntiedClause(OMPUntiedClause *) {}
-
-void OMPClauseWriter::VisitOMPMergeableClause(OMPMergeableClause *) {}
-
-void OMPClauseWriter::VisitOMPReadClause(OMPReadClause *) {}
-
-void OMPClauseWriter::VisitOMPWriteClause(OMPWriteClause *) {}
-
-void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *) {}
-
-void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
-
-void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
-
-void OMPClauseWriter::VisitOMPThreadsClause(OMPThreadsClause *) {}
-
-void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
-
-void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
-
-void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->private_copies()) {
- Record.AddStmt(VE);
- }
-}
-
-void OMPClauseWriter::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPreInit(C);
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->private_copies()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->inits()) {
- Record.AddStmt(VE);
- }
-}
-
-void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPostUpdate(C);
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *E : C->private_copies())
- Record.AddStmt(E);
- for (auto *E : C->source_exprs())
- Record.AddStmt(E);
- for (auto *E : C->destination_exprs())
- Record.AddStmt(E);
- for (auto *E : C->assignment_ops())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
-}
-
-void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPostUpdate(C);
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getColonLoc());
- Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
- Record.AddDeclarationNameInfo(C->getNameInfo());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *VE : C->privates())
- Record.AddStmt(VE);
- for (auto *E : C->lhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->rhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->reduction_ops())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPTaskReductionClause(OMPTaskReductionClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPostUpdate(C);
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getColonLoc());
- Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
- Record.AddDeclarationNameInfo(C->getNameInfo());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *VE : C->privates())
- Record.AddStmt(VE);
- for (auto *E : C->lhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->rhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->reduction_ops())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPInReductionClause(OMPInReductionClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPostUpdate(C);
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getColonLoc());
- Record.AddNestedNameSpecifierLoc(C->getQualifierLoc());
- Record.AddDeclarationNameInfo(C->getNameInfo());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *VE : C->privates())
- Record.AddStmt(VE);
- for (auto *E : C->lhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->rhs_exprs())
- Record.AddStmt(E);
- for (auto *E : C->reduction_ops())
- Record.AddStmt(E);
- for (auto *E : C->taskgroup_descriptors())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPLinearClause(OMPLinearClause *C) {
- Record.push_back(C->varlist_size());
- VisitOMPClauseWithPostUpdate(C);
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getColonLoc());
- Record.push_back(C->getModifier());
- Record.AddSourceLocation(C->getModifierLoc());
- for (auto *VE : C->varlists()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->privates()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->inits()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->updates()) {
- Record.AddStmt(VE);
- }
- for (auto *VE : C->finals()) {
- Record.AddStmt(VE);
- }
- Record.AddStmt(C->getStep());
- Record.AddStmt(C->getCalcStep());
-}
-
-void OMPClauseWriter::VisitOMPAlignedClause(OMPAlignedClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getColonLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- Record.AddStmt(C->getAlignment());
-}
-
-void OMPClauseWriter::VisitOMPCopyinClause(OMPCopyinClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *E : C->source_exprs())
- Record.AddStmt(E);
- for (auto *E : C->destination_exprs())
- Record.AddStmt(E);
- for (auto *E : C->assignment_ops())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- for (auto *E : C->source_exprs())
- Record.AddStmt(E);
- for (auto *E : C->destination_exprs())
- Record.AddStmt(E);
- for (auto *E : C->assignment_ops())
- Record.AddStmt(E);
-}
-
-void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
-}
-
-void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
- Record.push_back(C->varlist_size());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.push_back(C->getDependencyKind());
- Record.AddSourceLocation(C->getDependencyLoc());
- Record.AddSourceLocation(C->getColonLoc());
- for (auto *VE : C->varlists())
- Record.AddStmt(VE);
- Record.AddStmt(C->getCounterValue());
-}
-
-void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getDevice());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
- Record.push_back(C->varlist_size());
- Record.push_back(C->getUniqueDeclarationsNum());
- Record.push_back(C->getTotalComponentListNum());
- Record.push_back(C->getTotalComponentsNum());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.push_back(C->getMapTypeModifier());
- Record.push_back(C->getMapType());
- Record.AddSourceLocation(C->getMapLoc());
- Record.AddSourceLocation(C->getColonLoc());
- for (auto *E : C->varlists())
- Record.AddStmt(E);
- for (auto *D : C->all_decls())
- Record.AddDeclRef(D);
- for (auto N : C->all_num_lists())
- Record.push_back(N);
- for (auto N : C->all_lists_sizes())
- Record.push_back(N);
- for (auto &M : C->all_components()) {
- Record.AddStmt(M.getAssociatedExpression());
- Record.AddDeclRef(M.getAssociatedDeclaration());
- }
-}
-
-void OMPClauseWriter::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getNumTeams());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.AddStmt(C->getThreadLimit());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPPriorityClause(OMPPriorityClause *C) {
- Record.AddStmt(C->getPriority());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
- Record.AddStmt(C->getGrainsize());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
- Record.AddStmt(C->getNumTasks());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPHintClause(OMPHintClause *C) {
- Record.AddStmt(C->getHint());
- Record.AddSourceLocation(C->getLParenLoc());
-}
-
-void OMPClauseWriter::VisitOMPDistScheduleClause(OMPDistScheduleClause *C) {
- VisitOMPClauseWithPreInit(C);
- Record.push_back(C->getDistScheduleKind());
- Record.AddStmt(C->getChunkSize());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getDistScheduleKindLoc());
- Record.AddSourceLocation(C->getCommaLoc());
-}
-
-void OMPClauseWriter::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
- Record.push_back(C->getDefaultmapKind());
- Record.push_back(C->getDefaultmapModifier());
- Record.AddSourceLocation(C->getLParenLoc());
- Record.AddSourceLocation(C->getDefaultmapModifierLoc());
- Record.AddSourceLocation(C->getDefaultmapKindLoc());
-}
-
-void OMPClauseWriter::VisitOMPToClause(OMPToClause *C) {
- Record.push_back(C->varlist_size());
- Record.push_back(C->getUniqueDeclarationsNum());
- Record.push_back(C->getTotalComponentListNum());
- Record.push_back(C->getTotalComponentsNum());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *E : C->varlists())
- Record.AddStmt(E);
- for (auto *D : C->all_decls())
- Record.AddDeclRef(D);
- for (auto N : C->all_num_lists())
- Record.push_back(N);
- for (auto N : C->all_lists_sizes())
- Record.push_back(N);
- for (auto &M : C->all_components()) {
- Record.AddStmt(M.getAssociatedExpression());
- Record.AddDeclRef(M.getAssociatedDeclaration());
- }
-}
-
-void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
- Record.push_back(C->varlist_size());
- Record.push_back(C->getUniqueDeclarationsNum());
- Record.push_back(C->getTotalComponentListNum());
- Record.push_back(C->getTotalComponentsNum());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *E : C->varlists())
- Record.AddStmt(E);
- for (auto *D : C->all_decls())
- Record.AddDeclRef(D);
- for (auto N : C->all_num_lists())
- Record.push_back(N);
- for (auto N : C->all_lists_sizes())
- Record.push_back(N);
- for (auto &M : C->all_components()) {
- Record.AddStmt(M.getAssociatedExpression());
- Record.AddDeclRef(M.getAssociatedDeclaration());
- }
-}
-
-void OMPClauseWriter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
- Record.push_back(C->varlist_size());
- Record.push_back(C->getUniqueDeclarationsNum());
- Record.push_back(C->getTotalComponentListNum());
- Record.push_back(C->getTotalComponentsNum());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *E : C->varlists())
- Record.AddStmt(E);
- for (auto *VE : C->private_copies())
- Record.AddStmt(VE);
- for (auto *VE : C->inits())
- Record.AddStmt(VE);
- for (auto *D : C->all_decls())
- Record.AddDeclRef(D);
- for (auto N : C->all_num_lists())
- Record.push_back(N);
- for (auto N : C->all_lists_sizes())
- Record.push_back(N);
- for (auto &M : C->all_components()) {
- Record.AddStmt(M.getAssociatedExpression());
- Record.AddDeclRef(M.getAssociatedDeclaration());
- }
-}
-
-void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
- Record.push_back(C->varlist_size());
- Record.push_back(C->getUniqueDeclarationsNum());
- Record.push_back(C->getTotalComponentListNum());
- Record.push_back(C->getTotalComponentsNum());
- Record.AddSourceLocation(C->getLParenLoc());
- for (auto *E : C->varlists())
- Record.AddStmt(E);
- for (auto *D : C->all_decls())
- Record.AddDeclRef(D);
- for (auto N : C->all_num_lists())
- Record.push_back(N);
- for (auto N : C->all_lists_sizes())
- Record.push_back(N);
- for (auto &M : C->all_components()) {
- Record.AddStmt(M.getAssociatedExpression());
- Record.AddDeclRef(M.getAssociatedDeclaration());
- }
-}
-
-//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
- Record.AddSourceLocation(E->getLocStart());
- Record.AddSourceLocation(E->getLocEnd());
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
OMPClauseWriter ClauseWriter(Record);
for (unsigned i = 0; i < E->getNumClauses(); ++i) {
ClauseWriter.writeClause(E->getClause(i));
@@ -2325,6 +1924,8 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
Record.AddStmt(D->getCombinedCond());
Record.AddStmt(D->getCombinedNextLowerBound());
Record.AddStmt(D->getCombinedNextUpperBound());
+ Record.AddStmt(D->getCombinedDistCond());
+ Record.AddStmt(D->getCombinedParForInDistCond());
}
for (auto I : D->counters()) {
Record.AddStmt(I);
diff --git a/lib/Serialization/CMakeLists.txt b/lib/Serialization/CMakeLists.txt
index 95b33c388c56..a312cb91eb0d 100644
--- a/lib/Serialization/CMakeLists.txt
+++ b/lib/Serialization/CMakeLists.txt
@@ -17,6 +17,7 @@ add_clang_library(clangSerialization
Module.cpp
ModuleFileExtension.cpp
ModuleManager.cpp
+ PCHContainerOperations.cpp
ADDITIONAL_HEADERS
ASTCommon.h
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 3733638d2977..e7642a38924d 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -12,12 +12,12 @@
//===----------------------------------------------------------------------===//
#include "ASTReaderInternals.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Basic/FileManager.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 57ebaca10c99..54e0c08c5bc9 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -16,12 +16,11 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/MemoryBufferCache.h"
-#include "clang/Basic/VirtualFileSystem.h"
-#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/Module.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -33,6 +32,7 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <algorithm>
#include <cassert>
#include <memory>
@@ -150,7 +150,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
if (NewModule->Kind == MK_ImplicitModule) {
std::string TimestampFilename = NewModule->getTimestampFilename();
- vfs::Status Status;
+ llvm::vfs::Status Status;
// A cached stat value would be fine as well.
if (!FileMgr.getNoncachedStatValue(TimestampFilename, Status))
NewModule->InputFilesValidationTimestamp =
@@ -161,21 +161,24 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
if (std::unique_ptr<llvm::MemoryBuffer> Buffer = lookupBuffer(FileName)) {
// The buffer was already provided for us.
NewModule->Buffer = &PCMCache->addBuffer(FileName, std::move(Buffer));
+ // Since the cached buffer is reused, it is safe to close the file
+ // descriptor that was opened while stat()ing the PCM in
+ // lookupModuleFile() above, it won't be needed any longer.
+ Entry->closeFile();
} else if (llvm::MemoryBuffer *Buffer = PCMCache->lookupBuffer(FileName)) {
NewModule->Buffer = Buffer;
+ // As above, the file descriptor is no longer needed.
+ Entry->closeFile();
} else {
// Open the AST file.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf((std::error_code()));
if (FileName == "-") {
Buf = llvm::MemoryBuffer::getSTDIN();
} else {
- // Leave the FileEntry open so if it gets read again by another
- // ModuleManager it must be the same underlying file.
- // FIXME: Because FileManager::getFile() doesn't guarantee that it will
- // give us an open file, this may not be 100% reliable.
+ // Get a buffer of the file and close the file descriptor when done.
Buf = FileMgr.getBufferForFile(NewModule->File,
/*IsVolatile=*/false,
- /*ShouldClose=*/false);
+ /*ShouldClose=*/true);
}
if (!Buf) {
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Serialization/PCHContainerOperations.cpp
index 340e8ce63ff4..fbc613efeb63 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Serialization/PCHContainerOperations.cpp
@@ -1,4 +1,4 @@
-//===--- Frontend/PCHContainerOperations.cpp - PCH Containers ---*- C++ -*-===//
+//=== Serialization/PCHContainerOperations.cpp - PCH Containers -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PCHContainerOperations.h"
+#include "clang/Serialization/PCHContainerOperations.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Lex/ModuleLoader.h"
#include "llvm/Bitcode/BitstreamReader.h"
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
deleted file mode 100644
index 3dec8a58c929..000000000000
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Declares the configuration functions for leaks/allocation diagnostics.
-//
-//===--------------------------
-
-#include "AllocationDiagnostics.h"
-
-namespace clang {
-namespace ento {
-
-bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
- return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
- false);
-}
-
-}}
diff --git a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
deleted file mode 100644
index 62b7fab0739a..000000000000
--- a/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//=--- AllocationDiagnostics.h - Config options for allocation diags *- C++ -*-//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Declares the configuration functions for leaks/allocation diagnostics.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ALLOCATIONDIAGNOSTICS_H
-
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-
-namespace clang { namespace ento {
-
-/// Returns true if leak diagnostics should directly reference
-/// the allocatin site (where possible).
-///
-/// The default is false.
-///
-bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts);
-
-}}
-
-#endif
-
diff --git a/lib/StaticAnalyzer/Checkers/AllocationState.h b/lib/StaticAnalyzer/Checkers/AllocationState.h
index a6908bd7a651..c8193f77f928 100644
--- a/lib/StaticAnalyzer/Checkers/AllocationState.h
+++ b/lib/StaticAnalyzer/Checkers/AllocationState.h
@@ -26,6 +26,11 @@ ProgramStateRef markReleased(ProgramStateRef State, SymbolRef Sym,
/// AF_InnerBuffer symbols.
std::unique_ptr<BugReporterVisitor> getInnerPointerBRVisitor(SymbolRef Sym);
+/// 'Sym' represents a pointer to the inner buffer of a container object.
+/// This function looks up the memory region of that object in
+/// DanglingInternalBufferChecker's program state map.
+const MemRegion *getContainerObjRegion(ProgramStateRef State, SymbolRef Sym);
+
} // end namespace allocation_state
} // end namespace ento
diff --git a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
index e4cdc500de6a..b5d0f6620a1d 100644
--- a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -14,8 +14,9 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/CFGStmtMap.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -37,14 +38,15 @@ class AnalysisOrderChecker
check::PostStmt<OffsetOfExpr>,
check::PreCall,
check::PostCall,
+ check::EndFunction,
check::NewAllocator,
check::Bind,
check::RegionChanges,
check::LiveSymbols> {
bool isCallbackEnabled(AnalyzerOptions &Opts, StringRef CallbackName) const {
- return Opts.getBooleanOption("*", false, this) ||
- Opts.getBooleanOption(CallbackName, false, this);
+ return Opts.getCheckerBooleanOption("*", false, this) ||
+ Opts.getCheckerBooleanOption(CallbackName, false, this);
}
bool isCallbackEnabled(CheckerContext &C, StringRef CallbackName) const {
@@ -54,7 +56,7 @@ class AnalysisOrderChecker
bool isCallbackEnabled(ProgramStateRef State, StringRef CallbackName) const {
AnalyzerOptions &Opts = State->getStateManager().getOwningEngine()
- ->getAnalysisManager().getAnalyzerOptions();
+ .getAnalysisManager().getAnalyzerOptions();
return isCallbackEnabled(Opts, CallbackName);
}
@@ -121,6 +123,23 @@ public:
}
}
+ void checkEndFunction(const ReturnStmt *S, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "EndFunction")) {
+ llvm::errs() << "EndFunction\nReturnStmt: " << (S ? "yes" : "no") << "\n";
+ if (!S)
+ return;
+
+ llvm::errs() << "CFGElement: ";
+ CFGStmtMap *Map = C.getCurrentAnalysisDeclContext()->getCFGStmtMap();
+ CFGElement LastElement = Map->getBlock(S)->back();
+
+ if (LastElement.getAs<CFGStmt>())
+ llvm::errs() << "CFGStmt\n";
+ else if (LastElement.getAs<CFGAutomaticObjDtor>())
+ llvm::errs() << "CFGAutomaticObjDtor\n";
+ }
+ }
+
void checkNewAllocator(const CXXNewExpr *CNE, SVal Target,
CheckerContext &C) const {
if (isCallbackEnabled(C, "NewAllocator"))
diff --git a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index aadc6bac8d00..5e01012401b2 100644
--- a/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -8,7 +8,7 @@
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index c092610afe2b..20f3092fdba4 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 933380d494a4..26887be9f258 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 7d6358acbbac..577b5349f62e 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -13,14 +13,14 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
-#include "SelectorExtras.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Analysis/SelectorExtras.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -36,6 +36,7 @@
using namespace clang;
using namespace ento;
+using namespace llvm;
namespace {
class APIMisuse : public BugType {
@@ -156,6 +157,11 @@ void NilArgChecker::warnIfNilArg(CheckerContext &C,
if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
return;
+ // NOTE: We cannot throw non-fatal errors from warnIfNilExpr,
+ // because it's called multiple times from some callers, so it'd cause
+ // an unwanted state split if two or more non-fatal errors are thrown
+ // within the same checker callback. For now we don't want to, but
+ // it'll need to be fixed if we ever want to.
if (ExplodedNode *N = C.generateErrorNode()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
@@ -208,7 +214,7 @@ void NilArgChecker::generateBugReport(ExplodedNode *N,
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
R->addRange(Range);
- bugreporter::trackNullOrUndefValue(N, E, *R);
+ bugreporter::trackExpressionValue(N, E, *R);
C.emitReport(std::move(R));
}
@@ -526,93 +532,59 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
//===----------------------------------------------------------------------===//
namespace {
-class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable std::unique_ptr<APIMisuse> BT;
- mutable IdentifierInfo *Retain, *Release, *MakeCollectable, *Autorelease;
+class CFRetainReleaseChecker : public Checker<check::PreCall> {
+ mutable APIMisuse BT{this, "null passed to CF memory management function"};
+ CallDescription CFRetain{"CFRetain", 1},
+ CFRelease{"CFRelease", 1},
+ CFMakeCollectable{"CFMakeCollectable", 1},
+ CFAutorelease{"CFAutorelease", 1};
public:
- CFRetainReleaseChecker()
- : Retain(nullptr), Release(nullptr), MakeCollectable(nullptr),
- Autorelease(nullptr) {}
- void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
};
} // end anonymous namespace
-void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
+void CFRetainReleaseChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- // If the CallExpr doesn't have exactly 1 argument just give up checking.
- if (CE->getNumArgs() != 1)
+ // TODO: Make this check part of CallDescription.
+ if (!Call.isGlobalCFunction())
return;
- ProgramStateRef state = C.getState();
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
- return;
-
- if (!BT) {
- ASTContext &Ctx = C.getASTContext();
- Retain = &Ctx.Idents.get("CFRetain");
- Release = &Ctx.Idents.get("CFRelease");
- MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
- Autorelease = &Ctx.Idents.get("CFAutorelease");
- BT.reset(new APIMisuse(
- this, "null passed to CF memory management function"));
- }
-
// Check if we called CFRetain/CFRelease/CFMakeCollectable/CFAutorelease.
- const IdentifierInfo *FuncII = FD->getIdentifier();
- if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable ||
- FuncII == Autorelease))
+ if (!(Call.isCalled(CFRetain) || Call.isCalled(CFRelease) ||
+ Call.isCalled(CFMakeCollectable) || Call.isCalled(CFAutorelease)))
return;
- // FIXME: The rest of this just checks that the argument is non-null.
- // It should probably be refactored and combined with NonNullParamChecker.
-
// Get the argument's value.
- const Expr *Arg = CE->getArg(0);
- SVal ArgVal = C.getSVal(Arg);
+ SVal ArgVal = Call.getArgSVal(0);
Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
if (!DefArgVal)
return;
- // Get a NULL value.
- SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedSVal zero =
- svalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
-
- // Make an expression asserting that they're equal.
- DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
-
- // Are they equal?
- ProgramStateRef stateTrue, stateFalse;
- std::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+ // Is it null?
+ ProgramStateRef state = C.getState();
+ ProgramStateRef stateNonNull, stateNull;
+ std::tie(stateNonNull, stateNull) = state->assume(*DefArgVal);
- if (stateTrue && !stateFalse) {
- ExplodedNode *N = C.generateErrorNode(stateTrue);
+ if (!stateNonNull) {
+ ExplodedNode *N = C.generateErrorNode(stateNull);
if (!N)
return;
- const char *description;
- if (FuncII == Retain)
- description = "Null pointer argument in call to CFRetain";
- else if (FuncII == Release)
- description = "Null pointer argument in call to CFRelease";
- else if (FuncII == MakeCollectable)
- description = "Null pointer argument in call to CFMakeCollectable";
- else if (FuncII == Autorelease)
- description = "Null pointer argument in call to CFAutorelease";
- else
- llvm_unreachable("impossible case");
+ SmallString<64> Str;
+ raw_svector_ostream OS(Str);
+ OS << "Null pointer argument in call to "
+ << cast<FunctionDecl>(Call.getDecl())->getName();
- auto report = llvm::make_unique<BugReport>(*BT, description, N);
- report->addRange(Arg->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, Arg, *report);
+ auto report = llvm::make_unique<BugReport>(BT, OS.str(), N);
+ report->addRange(Call.getArgSourceRange(0));
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *report);
C.emitReport(std::move(report));
return;
}
// From here on, we know the argument is non-null.
- C.addTransition(stateFalse);
+ C.addTransition(stateNonNull);
}
//===----------------------------------------------------------------------===//
@@ -828,7 +800,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
//===----------------------------------------------------------------------===//
// The map from container symbol to the container count symbol.
-// We currently will remember the last countainer count symbol encountered.
+// We currently will remember the last container count symbol encountered.
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerCountMap, SymbolRef, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerNonEmptyMap, SymbolRef, bool)
diff --git a/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index c31f2794df6a..00d08b371f37 100644
--- a/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
diff --git a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index f26f73129e78..3008eddd397e 100644
--- a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 0e781d08e24c..f98027942e18 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -101,9 +101,10 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
// This must be resolvable at compile time, so we defer to the constant
// evaluator for a value.
SVal V = UnknownVal();
- llvm::APSInt Result;
- if (CE->EvaluateAsInt(Result, C.getASTContext(), Expr::SE_NoSideEffects)) {
+ Expr::EvalResult EVResult;
+ if (CE->EvaluateAsInt(EVResult, C.getASTContext(), Expr::SE_NoSideEffects)) {
// Make sure the result has the correct type.
+ llvm::APSInt Result = EVResult.Val.getInt();
SValBuilder &SVB = C.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
BVF.getAPSIntType(CE->getType()).apply(Result);
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 5bb4770b5675..10fb0bd3536c 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -3,7 +3,6 @@ set(LLVM_LINK_COMPONENTS
)
add_clang_library(clangStaticAnalyzerCheckers
- AllocationDiagnostics.cpp
AnalysisOrderChecker.cpp
AnalyzerStatsChecker.cpp
ArrayBoundChecker.cpp
@@ -23,7 +22,6 @@ add_clang_library(clangStaticAnalyzerCheckers
CheckSizeofPointer.cpp
CheckerDocumentation.cpp
ChrootChecker.cpp
- ClangCheckers.cpp
CloneChecker.cpp
ConversionChecker.cpp
CXXSelfAssignmentChecker.cpp
@@ -35,6 +33,7 @@ add_clang_library(clangStaticAnalyzerCheckers
DivZeroChecker.cpp
DynamicTypePropagation.cpp
DynamicTypeChecker.cpp
+ EnumCastOutOfRangeChecker.cpp
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
GCDAntipatternChecker.cpp
@@ -52,7 +51,7 @@ add_clang_library(clangStaticAnalyzerCheckers
MallocOverflowSecurityChecker.cpp
MallocSizeofChecker.cpp
MmapWriteExecChecker.cpp
- MisusedMovedObjectChecker.cpp
+ MoveChecker.cpp
MPI-Checker/MPIBugReporter.cpp
MPI-Checker/MPIChecker.cpp
MPI-Checker/MPIFunctionClassifier.cpp
@@ -76,7 +75,8 @@ add_clang_library(clangStaticAnalyzerCheckers
PointerArithChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
- RetainCountChecker.cpp
+ RetainCountChecker/RetainCountChecker.cpp
+ RetainCountChecker/RetainCountDiagnostics.cpp
ReturnPointerRangeChecker.cpp
ReturnUndefChecker.cpp
RunLoopAutoreleaseLeakChecker.cpp
@@ -93,7 +93,8 @@ add_clang_library(clangStaticAnalyzerCheckers
UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
- UninitializedObjectChecker.cpp
+ UninitializedObject/UninitializedObjectChecker.cpp
+ UninitializedObject/UninitializedPointee.cpp
UnixAPIChecker.cpp
UnreachableCodeChecker.cpp
VforkChecker.cpp
@@ -101,9 +102,6 @@ add_clang_library(clangStaticAnalyzerCheckers
ValistChecker.cpp
VirtualCallChecker.cpp
- DEPENDS
- ClangSACheckers
-
LINK_LIBS
clangAST
clangASTMatchers
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 12a576e5d80d..8bffada69b9b 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "InterCheckerAPI.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -124,6 +124,7 @@ public:
void evalStdCopyBackward(CheckerContext &C, const CallExpr *CE) const;
void evalStdCopyCommon(CheckerContext &C, const CallExpr *CE) const;
void evalMemset(CheckerContext &C, const CallExpr *CE) const;
+ void evalBzero(CheckerContext &C, const CallExpr *CE) const;
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
@@ -158,7 +159,7 @@ public:
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
- static bool memsetAux(const Expr *DstBuffer, const Expr *CharE,
+ static bool memsetAux(const Expr *DstBuffer, SVal CharE,
const Expr *Size, CheckerContext &C,
ProgramStateRef &State);
@@ -187,7 +188,7 @@ public:
const Expr *Buf,
const char *message = nullptr,
bool WarnAboutSize = false) const {
- // This is a convenience override.
+ // This is a convenience overload.
return CheckBufferAccess(C, state, Size, Buf, nullptr, message, nullptr,
WarnAboutSize);
}
@@ -553,7 +554,8 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Null.get());
auto Report = llvm::make_unique<BugReport>(*BT, WarningMsg, N);
Report->addRange(S->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, S, *Report);
+ if (const auto *Ex = dyn_cast<Expr>(S))
+ bugreporter::trackExpressionValue(N, Ex, *Report);
C.emitReport(std::move(Report));
}
}
@@ -1004,11 +1006,10 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
}
}
-bool CStringChecker::memsetAux(const Expr *DstBuffer, const Expr *CharE,
+bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
const Expr *Size, CheckerContext &C,
ProgramStateRef &State) {
SVal MemVal = C.getSVal(DstBuffer);
- SVal CharVal = C.getSVal(CharE);
SVal SizeVal = C.getSVal(Size);
const MemRegion *MR = MemVal.getAsRegion();
if (!MR)
@@ -2183,13 +2184,59 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
// According to the values of the arguments, bind the value of the second
// argument to the destination buffer and set string length, or just
// invalidate the destination buffer.
- if (!memsetAux(Mem, CharE, Size, C, State))
+ if (!memsetAux(Mem, C.getSVal(CharE), Size, C, State))
return;
State = State->BindExpr(CE, LCtx, MemVal);
C.addTransition(State);
}
+void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() != 2)
+ return;
+
+ CurrentFunctionDescription = "memory clearance function";
+
+ const Expr *Mem = CE->getArg(0);
+ const Expr *Size = CE->getArg(1);
+ SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
+
+ ProgramStateRef State = C.getState();
+
+ // See if the size argument is zero.
+ SVal SizeVal = C.getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ ProgramStateRef StateZeroSize, StateNonZeroSize;
+ std::tie(StateZeroSize, StateNonZeroSize) =
+ assumeZero(C, State, SizeVal, SizeTy);
+
+ // If the size is zero, there won't be any actual memory access,
+ // In this case we just return.
+ if (StateZeroSize && !StateNonZeroSize) {
+ C.addTransition(StateZeroSize);
+ return;
+ }
+
+ // Get the value of the memory area.
+ SVal MemVal = C.getSVal(Mem);
+
+ // Ensure the memory area is not null.
+ // If it is NULL there will be a NULL pointer dereference.
+ State = checkNonNull(C, StateNonZeroSize, Mem, MemVal);
+ if (!State)
+ return;
+
+ State = CheckBufferAccess(C, State, Size, Mem);
+ if (!State)
+ return;
+
+ if (!memsetAux(Mem, Zero, Size, C, State))
+ return;
+
+ C.addTransition(State);
+}
+
static bool isCPPStdLibraryFunction(const FunctionDecl *FD, StringRef Name) {
IdentifierInfo *II = FD->getIdentifier();
if (!II)
@@ -2207,60 +2254,86 @@ static bool isCPPStdLibraryFunction(const FunctionDecl *FD, StringRef Name) {
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
-bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+static CStringChecker::FnCheck identifyCall(const CallExpr *CE,
+ CheckerContext &C) {
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
-
if (!FDecl)
- return false;
+ return nullptr;
+
+ // Pro-actively check that argument types are safe to do arithmetic upon.
+ // We do not want to crash if someone accidentally passes a structure
+ // into, say, a C++ overload of any of these functions.
+ if (isCPPStdLibraryFunction(FDecl, "copy")) {
+ if (CE->getNumArgs() < 3 || !CE->getArg(2)->getType()->isPointerType())
+ return nullptr;
+ return &CStringChecker::evalStdCopy;
+ } else if (isCPPStdLibraryFunction(FDecl, "copy_backward")) {
+ if (CE->getNumArgs() < 3 || !CE->getArg(2)->getType()->isPointerType())
+ return nullptr;
+ return &CStringChecker::evalStdCopyBackward;
+ } else {
+ // An umbrella check for all C library functions.
+ for (auto I: CE->arguments()) {
+ QualType T = I->getType();
+ if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
+ return nullptr;
+ }
+ }
// FIXME: Poorly-factored string switches are slow.
- FnCheck evalFunction = nullptr;
if (C.isCLibraryFunction(FDecl, "memcpy"))
- evalFunction = &CStringChecker::evalMemcpy;
+ return &CStringChecker::evalMemcpy;
else if (C.isCLibraryFunction(FDecl, "mempcpy"))
- evalFunction = &CStringChecker::evalMempcpy;
+ return &CStringChecker::evalMempcpy;
else if (C.isCLibraryFunction(FDecl, "memcmp"))
- evalFunction = &CStringChecker::evalMemcmp;
+ return &CStringChecker::evalMemcmp;
else if (C.isCLibraryFunction(FDecl, "memmove"))
- evalFunction = &CStringChecker::evalMemmove;
- else if (C.isCLibraryFunction(FDecl, "memset"))
- evalFunction = &CStringChecker::evalMemset;
+ return &CStringChecker::evalMemmove;
+ else if (C.isCLibraryFunction(FDecl, "memset") ||
+ C.isCLibraryFunction(FDecl, "explicit_memset"))
+ return &CStringChecker::evalMemset;
else if (C.isCLibraryFunction(FDecl, "strcpy"))
- evalFunction = &CStringChecker::evalStrcpy;
+ return &CStringChecker::evalStrcpy;
else if (C.isCLibraryFunction(FDecl, "strncpy"))
- evalFunction = &CStringChecker::evalStrncpy;
+ return &CStringChecker::evalStrncpy;
else if (C.isCLibraryFunction(FDecl, "stpcpy"))
- evalFunction = &CStringChecker::evalStpcpy;
+ return &CStringChecker::evalStpcpy;
else if (C.isCLibraryFunction(FDecl, "strlcpy"))
- evalFunction = &CStringChecker::evalStrlcpy;
+ return &CStringChecker::evalStrlcpy;
else if (C.isCLibraryFunction(FDecl, "strcat"))
- evalFunction = &CStringChecker::evalStrcat;
+ return &CStringChecker::evalStrcat;
else if (C.isCLibraryFunction(FDecl, "strncat"))
- evalFunction = &CStringChecker::evalStrncat;
+ return &CStringChecker::evalStrncat;
else if (C.isCLibraryFunction(FDecl, "strlcat"))
- evalFunction = &CStringChecker::evalStrlcat;
+ return &CStringChecker::evalStrlcat;
else if (C.isCLibraryFunction(FDecl, "strlen"))
- evalFunction = &CStringChecker::evalstrLength;
+ return &CStringChecker::evalstrLength;
else if (C.isCLibraryFunction(FDecl, "strnlen"))
- evalFunction = &CStringChecker::evalstrnLength;
+ return &CStringChecker::evalstrnLength;
else if (C.isCLibraryFunction(FDecl, "strcmp"))
- evalFunction = &CStringChecker::evalStrcmp;
+ return &CStringChecker::evalStrcmp;
else if (C.isCLibraryFunction(FDecl, "strncmp"))
- evalFunction = &CStringChecker::evalStrncmp;
+ return &CStringChecker::evalStrncmp;
else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
- evalFunction = &CStringChecker::evalStrcasecmp;
+ return &CStringChecker::evalStrcasecmp;
else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
- evalFunction = &CStringChecker::evalStrncasecmp;
+ return &CStringChecker::evalStrncasecmp;
else if (C.isCLibraryFunction(FDecl, "strsep"))
- evalFunction = &CStringChecker::evalStrsep;
+ return &CStringChecker::evalStrsep;
else if (C.isCLibraryFunction(FDecl, "bcopy"))
- evalFunction = &CStringChecker::evalBcopy;
+ return &CStringChecker::evalBcopy;
else if (C.isCLibraryFunction(FDecl, "bcmp"))
- evalFunction = &CStringChecker::evalMemcmp;
- else if (isCPPStdLibraryFunction(FDecl, "copy"))
- evalFunction = &CStringChecker::evalStdCopy;
- else if (isCPPStdLibraryFunction(FDecl, "copy_backward"))
- evalFunction = &CStringChecker::evalStdCopyBackward;
+ return &CStringChecker::evalMemcmp;
+ else if (C.isCLibraryFunction(FDecl, "bzero") ||
+ C.isCLibraryFunction(FDecl, "explicit_bzero"))
+ return &CStringChecker::evalBzero;
+
+ return nullptr;
+}
+
+bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+
+ FnCheck evalFunction = identifyCall(CE, C);
// If the callee isn't a string function, let another checker handle it.
if (!evalFunction)
@@ -2384,9 +2457,6 @@ void CStringChecker::checkLiveSymbols(ProgramStateRef state,
void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
- if (!SR.hasDeadSymbols())
- return;
-
ProgramStateRef state = C.getState();
CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
index 8b4aa857e775..bbeb41c5f3cf 100644
--- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -12,7 +12,7 @@
// of bytes to copy.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/StmtVisitor.h"
@@ -90,7 +90,16 @@ class WalkAST: public StmtVisitor<WalkAST> {
/// strlcpy(dst, "abcd", 4);
/// strlcpy(dst + 3, "abcd", 2);
/// strlcpy(dst, "abcd", cpy);
- bool containsBadStrlcpyPattern(const CallExpr *CE);
+ /// Identify erroneous patterns in the last argument to strlcat - the number
+ /// of bytes to copy.
+ /// The bad pattern checked is when the last argument is basically
+ /// pointing to the destination buffer size or argument larger or
+ /// equal to.
+ /// char dst[2];
+ /// strlcat(dst, src2, sizeof(dst));
+ /// strlcat(dst, src2, 2);
+ /// strlcat(dst, src2, 10);
+ bool containsBadStrlcpyStrlcatPattern(const CallExpr *CE);
public:
WalkAST(const CheckerBase *Checker, BugReporter &BR, AnalysisDeclContext *AC)
@@ -142,15 +151,19 @@ bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
return false;
}
-bool WalkAST::containsBadStrlcpyPattern(const CallExpr *CE) {
+bool WalkAST::containsBadStrlcpyStrlcatPattern(const CallExpr *CE) {
if (CE->getNumArgs() != 3)
return false;
+ const FunctionDecl *FD = CE->getDirectCallee();
+ bool Append = CheckerContext::isCLibraryFunction(FD, "strlcat");
const Expr *DstArg = CE->getArg(0);
const Expr *LenArg = CE->getArg(2);
const auto *DstArgDecl = dyn_cast<DeclRefExpr>(DstArg->IgnoreParenImpCasts());
const auto *LenArgDecl = dyn_cast<DeclRefExpr>(LenArg->IgnoreParenLValueCasts());
uint64_t DstOff = 0;
+ if (isSizeof(LenArg, DstArg))
+ return false;
// - size_t dstlen = sizeof(dst)
if (LenArgDecl) {
const auto *LenArgVal = dyn_cast<VarDecl>(LenArgDecl->getDecl());
@@ -181,8 +194,14 @@ bool WalkAST::containsBadStrlcpyPattern(const CallExpr *CE) {
if (const auto *Buffer = dyn_cast<ConstantArrayType>(DstArgDecl->getType())) {
ASTContext &C = BR.getContext();
uint64_t BufferLen = C.getTypeSize(Buffer) / 8;
- if ((BufferLen - DstOff) < ILRawVal)
- return true;
+ auto RemainingBufferLen = BufferLen - DstOff;
+ if (Append) {
+ if (RemainingBufferLen <= ILRawVal)
+ return true;
+ } else {
+ if (RemainingBufferLen < ILRawVal)
+ return true;
+ }
}
}
}
@@ -219,8 +238,9 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
"C String API", os.str(), Loc,
LenArg->getSourceRange());
}
- } else if (CheckerContext::isCLibraryFunction(FD, "strlcpy")) {
- if (containsBadStrlcpyPattern(CE)) {
+ } else if (CheckerContext::isCLibraryFunction(FD, "strlcpy") ||
+ CheckerContext::isCLibraryFunction(FD, "strlcat")) {
+ if (containsBadStrlcpyStrlcatPattern(CE)) {
const Expr *DstArg = CE->getArg(0);
const Expr *LenArg = CE->getArg(2);
PathDiagnosticLocation Loc =
@@ -230,13 +250,17 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
SmallString<256> S;
llvm::raw_svector_ostream os(S);
- os << "The third argument is larger than the size of the input buffer. ";
+ os << "The third argument allows to potentially copy more bytes than it should. ";
+ os << "Replace with the value ";
if (!DstName.empty())
- os << "Replace with the value 'sizeof(" << DstName << ")` or lower";
+ os << "sizeof(" << DstName << ")";
+ else
+ os << "sizeof(<destination buffer>)";
+ os << " or lower";
BR.EmitBasicReport(FD, Checker, "Anti-pattern in the argument",
- "C String API", os.str(), Loc,
- LenArg->getSourceRange());
+ "C String API", os.str(), Loc,
+ LenArg->getSourceRange());
}
}
diff --git a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
index d1d37c75dfcc..0b539e1188eb 100644
--- a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -18,7 +18,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 20a46843e23e..ef30dc74c39d 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -108,7 +108,7 @@ void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
R->addRange(BadE->getSourceRange());
if (BadE->isGLValue())
BadE = bugreporter::getDerefExpr(BadE);
- bugreporter::trackNullOrUndefValue(N, BadE, *R);
+ bugreporter::trackExpressionValue(N, BadE, *R);
}
C.emitReport(std::move(R));
}
@@ -185,9 +185,9 @@ bool CallAndMessageChecker::uninitRefOrPointer(
LazyInit_BT(BD, BT);
auto R = llvm::make_unique<BugReport>(*BT, Os.str(), N);
R->addRange(ArgRange);
- if (ArgEx) {
- bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
- }
+ if (ArgEx)
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
+
C.emitReport(std::move(R));
}
return true;
@@ -196,6 +196,47 @@ bool CallAndMessageChecker::uninitRefOrPointer(
return false;
}
+namespace {
+class FindUninitializedField {
+public:
+ SmallVector<const FieldDecl *, 10> FieldChain;
+
+private:
+ StoreManager &StoreMgr;
+ MemRegionManager &MrMgr;
+ Store store;
+
+public:
+ FindUninitializedField(StoreManager &storeMgr, MemRegionManager &mrMgr,
+ Store s)
+ : StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+
+ bool Find(const TypedValueRegion *R) {
+ QualType T = R->getValueType();
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ assert(RD && "Referred record has no definition");
+ for (const auto *I : RD->fields()) {
+ const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
+ FieldChain.push_back(I);
+ T = I->getType();
+ if (T->getAsStructureType()) {
+ if (Find(FR))
+ return true;
+ } else {
+ const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
+ if (V.isUndef())
+ return true;
+ }
+ FieldChain.pop_back();
+ }
+ }
+
+ return false;
+ }
+};
+} // namespace
+
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
SVal V,
SourceRange ArgRange,
@@ -223,7 +264,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
R->addRange(ArgRange);
if (ArgEx)
- bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
C.emitReport(std::move(R));
}
return true;
@@ -232,47 +273,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
if (!CheckUninitFields)
return false;
- if (Optional<nonloc::LazyCompoundVal> LV =
- V.getAs<nonloc::LazyCompoundVal>()) {
-
- class FindUninitializedField {
- public:
- SmallVector<const FieldDecl *, 10> FieldChain;
- private:
- StoreManager &StoreMgr;
- MemRegionManager &MrMgr;
- Store store;
- public:
- FindUninitializedField(StoreManager &storeMgr,
- MemRegionManager &mrMgr, Store s)
- : StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
-
- bool Find(const TypedValueRegion *R) {
- QualType T = R->getValueType();
- if (const RecordType *RT = T->getAsStructureType()) {
- const RecordDecl *RD = RT->getDecl()->getDefinition();
- assert(RD && "Referred record has no definition");
- for (const auto *I : RD->fields()) {
- const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
- FieldChain.push_back(I);
- T = I->getType();
- if (T->getAsStructureType()) {
- if (Find(FR))
- return true;
- }
- else {
- const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
- if (V.isUndef())
- return true;
- }
- FieldChain.pop_back();
- }
- }
-
- return false;
- }
- };
-
+ if (auto LV = V.getAs<nonloc::LazyCompoundVal>()) {
const LazyCompoundValData *D = LV->getCVData();
FindUninitializedField F(C.getState()->getStateManager().getStoreManager(),
C.getSValBuilder().getRegionManager(),
@@ -305,6 +306,8 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
R->addRange(ArgRange);
+ if (ArgEx)
+ bugreporter::trackExpressionValue(N, ArgEx, *R);
// FIXME: enhance track back for uninitialized value for arbitrary
// memregions
C.emitReport(std::move(R));
@@ -364,7 +367,7 @@ void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
Desc = "Argument to 'delete' is uninitialized";
BugType *BT = BT_cxx_delete_undef.get();
auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
- bugreporter::trackNullOrUndefValue(N, DE, *R);
+ bugreporter::trackExpressionValue(N, DE, *R);
C.emitReport(std::move(R));
return;
}
@@ -493,7 +496,7 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
// FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
if (const Expr *ReceiverE = ME->getInstanceReceiver())
- bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
+ bugreporter::trackExpressionValue(N, ReceiverE, *R);
C.emitReport(std::move(R));
}
return;
@@ -534,7 +537,7 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
report->addRange(ME->getReceiverRange());
// FIXME: This won't track "self" in messages to super.
if (const Expr *receiver = ME->getInstanceReceiver()) {
- bugreporter::trackNullOrUndefValue(N, receiver, *report);
+ bugreporter::trackExpressionValue(N, receiver, *report);
}
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 059553b21995..5deb62d32311 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -11,7 +11,7 @@
// whether the size of the symbolic region is a multiple of the size of T.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index 00e903355720..2bd3879627cb 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index f4d2e32cef11..00a912f27a8d 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -28,7 +28,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
@@ -178,20 +178,12 @@ private:
};
} // End anonymous namespace.
-typedef llvm::ImmutableSet<SymbolRef> SymbolSet;
/// Maps from the symbol for a class instance to the set of
/// symbols remaining that must be released in -dealloc.
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(SymbolSet, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(UnreleasedIvarMap, SymbolRef, SymbolSet)
-namespace clang {
-namespace ento {
-template<> struct ProgramStateTrait<SymbolSet>
-: public ProgramStatePartialTrait<SymbolSet> {
- static void *GDMIndex() { static int index = 0; return &index; }
-};
-}
-}
/// An AST check that diagnose when the class requires a -dealloc method and
/// is missing one.
@@ -723,6 +715,10 @@ bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue,
bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
const ObjCMethodCall &M,
CheckerContext &C) const {
+ // TODO: Apart from unknown/undefined receivers, this may happen when
+ // dealloc is called as a class method. Should we warn?
+ if (!DeallocedValue)
+ return false;
// Find the property backing the instance variable that M
// is dealloc'ing.
@@ -761,15 +757,15 @@ ObjCDeallocChecker::ObjCDeallocChecker()
MissingReleaseBugType.reset(
new BugType(this, "Missing ivar release (leak)",
- categories::MemoryCoreFoundationObjectiveC));
+ categories::MemoryRefCount));
ExtraReleaseBugType.reset(
new BugType(this, "Extra ivar release",
- categories::MemoryCoreFoundationObjectiveC));
+ categories::MemoryRefCount));
MistakenDeallocBugType.reset(
new BugType(this, "Mistaken dealloc",
- categories::MemoryCoreFoundationObjectiveC));
+ categories::MemoryRefCount));
}
void ObjCDeallocChecker::initIdentifierInfoAndSelectors(
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index cc4c0c3db846..fe6715595e6f 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Type.h"
diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 202233acffab..163ca9d8556f 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/TargetInfo.h"
@@ -29,10 +29,10 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
const llvm::Triple &T = Ctx.getTargetInfo().getTriple();
return T.getVendor() == llvm::Triple::Apple ||
T.getOS() == llvm::Triple::CloudABI ||
- T.getOS() == llvm::Triple::FreeBSD ||
- T.getOS() == llvm::Triple::NetBSD ||
- T.getOS() == llvm::Triple::OpenBSD ||
- T.getOS() == llvm::Triple::DragonFly;
+ T.isOSFreeBSD() ||
+ T.isOSNetBSD() ||
+ T.isOSOpenBSD() ||
+ T.isOSDragonFly();
}
namespace {
@@ -188,7 +188,7 @@ void WalkAST::VisitForStmt(ForStmt *FS) {
}
//===----------------------------------------------------------------------===//
-// Check: floating poing variable used as loop counter.
+// Check: floating point variable used as loop counter.
// Originally: <rdar://problem/6336718>
// Implements: CERT security coding advisory FLP-30.
//===----------------------------------------------------------------------===//
@@ -597,9 +597,10 @@ void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
unsigned suffix = 0;
if (ArgSuffix.second >= 0) {
const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
- llvm::APSInt Result;
- if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+ Expr::EvalResult EVResult;
+ if (!suffixEx->EvaluateAsInt(EVResult, BR.getContext()))
return;
+ llvm::APSInt Result = EVResult.Val.getInt();
// FIXME: Issue a warning.
if (Result.isNegative())
return;
@@ -650,14 +651,14 @@ void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
const auto *Target = CE->getArg(0)->IgnoreImpCasts(),
*Source = CE->getArg(1)->IgnoreImpCasts();
- if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Target))
- if (const auto *Array = dyn_cast<ConstantArrayType>(DeclRef->getType())) {
- uint64_t ArraySize = BR.getContext().getTypeSize(Array) / 8;
- if (const auto *String = dyn_cast<StringLiteral>(Source)) {
- if (ArraySize >= String->getLength() + 1)
- return;
- }
+
+ if (const auto *Array = dyn_cast<ConstantArrayType>(Target->getType())) {
+ uint64_t ArraySize = BR.getContext().getTypeSize(Array) / 8;
+ if (const auto *String = dyn_cast<StringLiteral>(Source)) {
+ if (ArraySize >= String->getLength() + 1)
+ return;
}
+ }
// Issue a warning.
PathDiagnosticLocation CELoc =
diff --git a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
index e079a8cb12be..7688b713b06b 100644
--- a/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 7862a4c25681..44fac0278bdd 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -169,7 +169,7 @@ public:
/// This callback should be used by the checkers to aggressively clean
/// up/reduce the checker state, which is important for reducing the overall
/// memory usage. Specifically, if a checker keeps symbol specific information
- /// in the sate, it can and should be dropped after the symbol becomes dead.
+ /// in the state, it can and should be dropped after the symbol becomes dead.
/// In addition, reporting a bug as soon as the checker becomes dead leads to
/// more precise diagnostics. (For example, one should report that a malloced
/// variable is not freed right after it goes out of scope.)
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index b38992b0e030..673608db1a1d 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp b/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
deleted file mode 100644
index fb9e366c3de0..000000000000
--- a/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
-#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
-
-// FIXME: This is only necessary as long as there are checker registration
-// functions that do additional work besides mgr.registerChecker<CLASS>().
-// The only checkers that currently do this are:
-// - NSAutoreleasePoolChecker
-// - NSErrorChecker
-// - ObjCAtSyncChecker
-// It's probably worth including this information in Checkers.td to minimize
-// boilerplate code.
-#include "ClangSACheckers.h"
-
-using namespace clang;
-using namespace ento;
-
-void ento::registerBuiltinCheckers(CheckerRegistry &registry) {
-#define GET_CHECKERS
-#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
- registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
-#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
-#undef GET_CHECKERS
-}
diff --git a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
deleted file mode 100644
index d6e96f27a75e..000000000000
--- a/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
+++ /dev/null
@@ -1,37 +0,0 @@
-//===--- ClangSACheckers.h - Registration functions for Checkers *- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Declares the registation functions for the checkers defined in
-// libclangStaticAnalyzerCheckers.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
-
-#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
-
-namespace clang {
-
-namespace ento {
-class CheckerManager;
-class CheckerRegistry;
-
-#define GET_CHECKERS
-#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
- void register##CLASS(CheckerManager &mgr);
-#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
-#undef CHECKER
-#undef GET_CHECKERS
-
-} // end ento namespace
-
-} // end clang namespace
-
-#endif
diff --git a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index ee517ed97770..89354b866004 100644
--- a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -13,7 +13,7 @@
///
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Analysis/CloneDetection.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -42,7 +42,7 @@ public:
void reportClones(BugReporter &BR, AnalysisManager &Mgr,
std::vector<CloneDetector::CloneGroup> &CloneGroups) const;
- /// Reports only suspicious clones to the user along with informaton
+ /// Reports only suspicious clones to the user along with information
/// that explain why they are suspicious.
void reportSuspiciousClones(
BugReporter &BR, AnalysisManager &Mgr,
@@ -63,18 +63,18 @@ void CloneChecker::checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
// At this point, every statement in the translation unit has been analyzed by
// the CloneDetector. The only thing left to do is to report the found clones.
- int MinComplexity = Mgr.getAnalyzerOptions().getOptionAsInteger(
+ int MinComplexity = Mgr.getAnalyzerOptions().getCheckerIntegerOption(
"MinimumCloneComplexity", 50, this);
assert(MinComplexity >= 0);
- bool ReportSuspiciousClones = Mgr.getAnalyzerOptions().getBooleanOption(
- "ReportSuspiciousClones", true, this);
+ bool ReportSuspiciousClones = Mgr.getAnalyzerOptions()
+ .getCheckerBooleanOption("ReportSuspiciousClones", true, this);
- bool ReportNormalClones = Mgr.getAnalyzerOptions().getBooleanOption(
+ bool ReportNormalClones = Mgr.getAnalyzerOptions().getCheckerBooleanOption(
"ReportNormalClones", true, this);
- StringRef IgnoredFilesPattern = Mgr.getAnalyzerOptions().getOptionAsString(
- "IgnoredFilesPattern", "", this);
+ StringRef IgnoredFilesPattern = Mgr.getAnalyzerOptions()
+ .getCheckerStringOption("IgnoredFilesPattern", "", this);
// Let the CloneDetector create a list of clones from all the analyzed
// statements. We don't filter for matching variable patterns at this point
diff --git a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 17ec2c288777..a5c67c2a5b45 100644
--- a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -14,20 +14,25 @@
// of expressions. A warning is reported when:
// * a negative value is implicitly converted to an unsigned value in an
// assignment, comparison or multiplication.
-// * assignment / initialization when source value is greater than the max
-// value of target
+// * assignment / initialization when the source value is greater than the max
+// value of the target integer type
+// * assignment / initialization when the source integer is above the range
+// where the target floating point type can represent all integers
//
// Many compilers and tools have similar checks that are based on semantic
// analysis. Those checks are sound but have poor precision. ConversionChecker
// is an alternative to those checks.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/APFloat.h"
+
+#include <climits>
using namespace clang;
using namespace ento;
@@ -40,11 +45,9 @@ public:
private:
mutable std::unique_ptr<BuiltinBug> BT;
- // Is there loss of precision
bool isLossOfPrecision(const ImplicitCastExpr *Cast, QualType DestType,
CheckerContext &C) const;
- // Is there loss of sign
bool isLossOfSign(const ImplicitCastExpr *Cast, CheckerContext &C) const;
void reportBug(ExplodedNode *N, CheckerContext &C, const char Msg[]) const;
@@ -132,19 +135,51 @@ bool ConversionChecker::isLossOfPrecision(const ImplicitCastExpr *Cast,
QualType SubType = Cast->IgnoreParenImpCasts()->getType();
- if (!DestType->isIntegerType() || !SubType->isIntegerType())
+ if (!DestType->isRealType() || !SubType->isIntegerType())
return false;
- if (C.getASTContext().getIntWidth(DestType) >=
- C.getASTContext().getIntWidth(SubType))
+ const bool isFloat = DestType->isFloatingType();
+
+ const auto &AC = C.getASTContext();
+
+ // We will find the largest RepresentsUntilExp value such that the DestType
+ // can exactly represent all nonnegative integers below 2^RepresentsUntilExp.
+ unsigned RepresentsUntilExp;
+
+ if (isFloat) {
+ const llvm::fltSemantics &Sema = AC.getFloatTypeSemantics(DestType);
+ RepresentsUntilExp = llvm::APFloat::semanticsPrecision(Sema);
+ } else {
+ RepresentsUntilExp = AC.getIntWidth(DestType);
+ if (RepresentsUntilExp == 1) {
+ // This is just casting a number to bool, probably not a bug.
+ return false;
+ }
+ if (DestType->isSignedIntegerType())
+ RepresentsUntilExp--;
+ }
+
+ if (RepresentsUntilExp >= sizeof(unsigned long long) * CHAR_BIT) {
+ // Avoid overflow in our later calculations.
return false;
+ }
+
+ unsigned CorrectedSrcWidth = AC.getIntWidth(SubType);
+ if (SubType->isSignedIntegerType())
+ CorrectedSrcWidth--;
- unsigned W = C.getASTContext().getIntWidth(DestType);
- if (W == 1 || W >= 64U)
+ if (RepresentsUntilExp >= CorrectedSrcWidth) {
+ // Simple case: the destination can store all values of the source type.
return false;
+ }
- unsigned long long MaxVal = 1ULL << W;
+ unsigned long long MaxVal = 1ULL << RepresentsUntilExp;
+ if (isFloat) {
+ // If this is a floating point type, it can also represent MaxVal exactly.
+ MaxVal++;
+ }
return C.isGreaterOrEqual(Cast->getSubExpr(), MaxVal);
+ // TODO: maybe also check negative values with too large magnitude.
}
bool ConversionChecker::isLossOfSign(const ImplicitCastExpr *Cast,
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index f7b5f61cfb8a..4e0f6d3bedfd 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
@@ -262,7 +262,7 @@ public:
currentBlock = block;
// Skip statements in macros.
- if (S->getLocStart().isMacroID())
+ if (S->getBeginLoc().isMacroID())
return;
// Only cover dead stores from regular assignments. ++/-- dead stores
@@ -329,9 +329,8 @@ public:
return;
if (const Expr *E = V->getInit()) {
- while (const ExprWithCleanups *exprClean =
- dyn_cast<ExprWithCleanups>(E))
- E = exprClean->getSubExpr();
+ while (const FullExpr *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr();
// Look through transitive assignments, e.g.:
// int x = y = 0;
diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 810a33ed404d..90b1111aff0f 100644
--- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Analysis/Analyses/Dominators.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CallGraph.h"
@@ -69,6 +69,25 @@ void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
}
//===----------------------------------------------------------------------===//
+// LiveStatementsDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LiveStatementsDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ if (LiveVariables *L = Mgr.getAnalysis<RelaxedLiveVariables>(D))
+ L->dumpStmtLiveness(Mgr.getSourceManager());
+ }
+};
+}
+
+void ento::registerLiveStatementsDumper(CheckerManager &mgr) {
+ mgr.registerChecker<LiveStatementsDumper>();
+}
+
+//===----------------------------------------------------------------------===//
// CFGViewer
//===----------------------------------------------------------------------===//
@@ -182,7 +201,9 @@ public:
llvm::errs() << "[config]\n";
for (unsigned I = 0, E = Keys.size(); I != E; ++I)
- llvm::errs() << Keys[I]->getKey() << " = " << Keys[I]->second << '\n';
+ llvm::errs() << Keys[I]->getKey() << " = "
+ << (Keys[I]->second.empty() ? "\"\"" : Keys[I]->second)
+ << '\n';
llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
}
diff --git a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
index d3489282ab62..adf5a8e77a74 100644
--- a/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
@@ -21,7 +21,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -47,7 +47,6 @@ class DeleteWithNonVirtualDtorChecker
ID.AddPointer(&X);
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -104,7 +103,7 @@ void DeleteWithNonVirtualDtorChecker::checkPreStmt(const CXXDeleteExpr *DE,
std::shared_ptr<PathDiagnosticPiece>
DeleteWithNonVirtualDtorChecker::DeleteBugVisitor::VisitNode(
- const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ const ExplodedNode *N, BugReporterContext &BRC,
BugReport &BR) {
// Stop traversal after the first conversion was found on a path.
if (Satisfied)
diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 152b937bb03f..d01a889d256a 100644
--- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -111,6 +111,12 @@ static bool suppressReport(const Expr *E) {
return E->getType().getQualifiers().hasAddressSpace();
}
+static bool isDeclRefExprToReference(const Expr *E) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl()->getType()->isReferenceType();
+ return false;
+}
+
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
CheckerContext &C) const {
// Generate an error node.
@@ -154,7 +160,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
}
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
- if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) {
+ if (M->isArrow() || isDeclRefExprToReference(M->getBase())) {
os << "Access to field '" << M->getMemberNameInfo()
<< "' results in a dereference of a null pointer";
AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
@@ -177,7 +183,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
auto report = llvm::make_unique<BugReport>(
*BT_null, buf.empty() ? BT_null->getDescription() : StringRef(buf), N);
- bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S), *report);
+ bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
for (SmallVectorImpl<SourceRange>::iterator
I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
@@ -197,8 +203,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
auto report =
llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S),
- *report);
+ bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
C.emitReport(std::move(report));
}
return;
diff --git a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index 5efb9096f2ff..2a559422df34 100644
--- a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -21,7 +21,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index bc39c92ea970..a220a0513e28 100644
--- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -32,6 +32,13 @@ public:
};
} // end anonymous namespace
+static const Expr *getDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const auto *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return nullptr;
+}
+
void DivZeroChecker::reportBug(
const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
std::unique_ptr<BugReporterVisitor> Visitor) const {
@@ -41,7 +48,7 @@ void DivZeroChecker::reportBug(
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
R->addVisitor(std::move(Visitor));
- bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
+ bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
C.emitReport(std::move(R));
}
}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 4e4d81cd6714..803d7ae22a71 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -17,7 +17,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -49,7 +49,6 @@ class DynamicTypeChecker : public Checker<check::PostStmt<ImplicitCastExpr>> {
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -92,11 +91,10 @@ void DynamicTypeChecker::reportTypeError(QualType DynamicType,
std::shared_ptr<PathDiagnosticPiece>
DynamicTypeChecker::DynamicTypeBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
- BugReport &BR) {
+ BugReport &) {
ProgramStateRef State = N->getState();
- ProgramStateRef StatePrev = PrevN->getState();
+ ProgramStateRef StatePrev = N->getFirstPred()->getState();
DynamicTypeInfo TrackedType = getDynamicTypeInfo(State, Reg);
DynamicTypeInfo TrackedTypePrev = getDynamicTypeInfo(StatePrev, Reg);
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 126e57645a43..31d4eebe8968 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -21,7 +21,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
@@ -85,7 +85,6 @@ class DynamicTypePropagation:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -124,11 +123,6 @@ void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
}
}
- if (!SR.hasDeadSymbols()) {
- C.addTransition(State);
- return;
- }
-
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
@@ -937,11 +931,10 @@ void DynamicTypePropagation::reportGenericsBug(
std::shared_ptr<PathDiagnosticPiece>
DynamicTypePropagation::GenericsBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
ProgramStateRef state = N->getState();
- ProgramStateRef statePrev = PrevN->getState();
+ ProgramStateRef statePrev = N->getFirstPred()->getState();
const ObjCObjectPointerType *const *TrackedType =
state->get<MostSpecializedTypeArgsMap>(Sym);
diff --git a/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
new file mode 100644
index 000000000000..4e51cffaa744
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -0,0 +1,128 @@
+//===- EnumCastOutOfRangeChecker.cpp ---------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EnumCastOutOfRangeChecker is responsible for checking integer to
+// enumeration casts that could result in undefined values. This could happen
+// if the value that we cast from is out of the value range of the enumeration.
+// Reference:
+// [ISO/IEC 14882-2014] ISO/IEC 14882-2014.
+// Programming Languages — C++, Fourth Edition. 2014.
+// C++ Standard, [dcl.enum], in paragraph 8, which defines the range of an enum
+// C++ Standard, [expr.static.cast], paragraph 10, which defines the behaviour
+// of casting an integer value that is out of range
+// SEI CERT C++ Coding Standard, INT50-CPP. Do not cast to an out-of-range
+// enumeration value
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+// This evaluator checks two SVals for equality. The first SVal is provided via
+// the constructor, the second is the parameter of the overloaded () operator.
+// It uses the in-built ConstraintManager to resolve the equlity to possible or
+// not possible ProgramStates.
+class ConstraintBasedEQEvaluator {
+ const DefinedOrUnknownSVal CompareValue;
+ const ProgramStateRef PS;
+ SValBuilder &SVB;
+
+public:
+ ConstraintBasedEQEvaluator(CheckerContext &C,
+ const DefinedOrUnknownSVal CompareValue)
+ : CompareValue(CompareValue), PS(C.getState()), SVB(C.getSValBuilder()) {}
+
+ bool operator()(const llvm::APSInt &EnumDeclInitValue) {
+ DefinedOrUnknownSVal EnumDeclValue = SVB.makeIntVal(EnumDeclInitValue);
+ DefinedOrUnknownSVal ElemEqualsValueToCast =
+ SVB.evalEQ(PS, EnumDeclValue, CompareValue);
+
+ return static_cast<bool>(PS->assume(ElemEqualsValueToCast, true));
+ }
+};
+
+// This checker checks CastExpr statements.
+// If the value provided to the cast is one of the values the enumeration can
+// represent, the said value matches the enumeration. If the checker can
+// establish the impossibility of matching it gives a warning.
+// Being conservative, it does not warn if there is slight possibility the
+// value can be matching.
+class EnumCastOutOfRangeChecker : public Checker<check::PreStmt<CastExpr>> {
+ mutable std::unique_ptr<BuiltinBug> EnumValueCastOutOfRange;
+ void reportWarning(CheckerContext &C) const;
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+
+using EnumValueVector = llvm::SmallVector<llvm::APSInt, 6>;
+
+// Collects all of the values an enum can represent (as SVals).
+EnumValueVector getDeclValuesForEnum(const EnumDecl *ED) {
+ EnumValueVector DeclValues(
+ std::distance(ED->enumerator_begin(), ED->enumerator_end()));
+ llvm::transform(ED->enumerators(), DeclValues.begin(),
+ [](const EnumConstantDecl *D) { return D->getInitVal(); });
+ return DeclValues;
+}
+} // namespace
+
+void EnumCastOutOfRangeChecker::reportWarning(CheckerContext &C) const {
+ if (const ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ if (!EnumValueCastOutOfRange)
+ EnumValueCastOutOfRange.reset(
+ new BuiltinBug(this, "Enum cast out of range",
+ "The value provided to the cast expression is not in "
+ "the valid range of values for the enum"));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *EnumValueCastOutOfRange, EnumValueCastOutOfRange->getDescription(),
+ N));
+ }
+}
+
+void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ // Get the value of the expression to cast.
+ const llvm::Optional<DefinedOrUnknownSVal> ValueToCast =
+ C.getSVal(CE->getSubExpr()).getAs<DefinedOrUnknownSVal>();
+
+ // If the value cannot be reasoned about (not even a DefinedOrUnknownSVal),
+ // don't analyze further.
+ if (!ValueToCast)
+ return;
+
+ const QualType T = CE->getType();
+ // Check whether the cast type is an enum.
+ if (!T->isEnumeralType())
+ return;
+
+ // If the cast is an enum, get its declaration.
+ // If the isEnumeralType() returned true, then the declaration must exist
+ // even if it is a stub declaration. It is up to the getDeclValuesForEnum()
+ // function to handle this.
+ const EnumDecl *ED = T->castAs<EnumType>()->getDecl();
+
+ EnumValueVector DeclValues = getDeclValuesForEnum(ED);
+ // Check if any of the enum values possibly match.
+ bool PossibleValueMatch = llvm::any_of(
+ DeclValues, ConstraintBasedEQEvaluator(C, *ValueToCast));
+
+ // If there is no value that can possibly match any of the enum values, then
+ // warn.
+ if (!PossibleValueMatch)
+ reportWarning(C);
+}
+
+void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<EnumCastOutOfRangeChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 8de653c10f7e..2553f54bbcac 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -43,6 +43,8 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
void analyzerPrintState(const CallExpr *CE, CheckerContext &C) const;
void analyzerGetExtent(const CallExpr *CE, CheckerContext &C) const;
void analyzerHashDump(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDenote(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerExpress(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
@@ -60,6 +62,7 @@ public:
}
REGISTER_SET_WITH_PROGRAMSTATE(MarkedSymbols, SymbolRef)
+REGISTER_MAP_WITH_PROGRAMSTATE(DenotedSymbols, SymbolRef, const StringLiteral *)
bool ExprInspectionChecker::evalCall(const CallExpr *CE,
CheckerContext &C) const {
@@ -82,6 +85,8 @@ bool ExprInspectionChecker::evalCall(const CallExpr *CE,
.Case("clang_analyzer_numTimesReached",
&ExprInspectionChecker::analyzerNumTimesReached)
.Case("clang_analyzer_hashDump", &ExprInspectionChecker::analyzerHashDump)
+ .Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
+ .Case("clang_analyzer_express", &ExprInspectionChecker::analyzerExpress)
.Default(nullptr);
if (!Handler)
@@ -264,6 +269,13 @@ void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
N = BugNode;
State = State->remove<MarkedSymbols>(Sym);
}
+
+ for (auto I : State->get<DenotedSymbols>()) {
+ SymbolRef Sym = I.first;
+ if (!SymReaper.isLive(Sym))
+ State = State->remove<DenotedSymbols>(Sym);
+ }
+
C.addTransition(State, N);
}
@@ -287,7 +299,7 @@ void ExprInspectionChecker::analyzerHashDump(const CallExpr *CE,
CheckerContext &C) const {
const LangOptions &Opts = C.getLangOpts();
const SourceManager &SM = C.getSourceManager();
- FullSourceLoc FL(CE->getArg(0)->getLocStart(), SM);
+ FullSourceLoc FL(CE->getArg(0)->getBeginLoc(), SM);
std::string HashContent =
GetIssueString(SM, FL, getCheckName().getName(), "Category",
C.getLocationContext()->getDecl(), Opts);
@@ -295,6 +307,105 @@ void ExprInspectionChecker::analyzerHashDump(const CallExpr *CE,
reportBug(HashContent, C);
}
+void ExprInspectionChecker::analyzerDenote(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() < 2) {
+ reportBug("clang_analyzer_denote() requires a symbol and a string literal",
+ C);
+ return;
+ }
+
+ SymbolRef Sym = C.getSVal(CE->getArg(0)).getAsSymbol();
+ if (!Sym) {
+ reportBug("Not a symbol", C);
+ return;
+ }
+
+ const auto *E = dyn_cast<StringLiteral>(CE->getArg(1)->IgnoreParenCasts());
+ if (!E) {
+ reportBug("Not a string literal", C);
+ return;
+ }
+
+ ProgramStateRef State = C.getState();
+
+ C.addTransition(C.getState()->set<DenotedSymbols>(Sym, E));
+}
+
+namespace {
+class SymbolExpressor
+ : public SymExprVisitor<SymbolExpressor, Optional<std::string>> {
+ ProgramStateRef State;
+
+public:
+ SymbolExpressor(ProgramStateRef State) : State(State) {}
+
+ Optional<std::string> lookup(const SymExpr *S) {
+ if (const StringLiteral *const *SLPtr = State->get<DenotedSymbols>(S)) {
+ const StringLiteral *SL = *SLPtr;
+ return std::string(SL->getBytes());
+ }
+ return None;
+ }
+
+ Optional<std::string> VisitSymExpr(const SymExpr *S) {
+ return lookup(S);
+ }
+
+ Optional<std::string> VisitSymIntExpr(const SymIntExpr *S) {
+ if (Optional<std::string> Str = lookup(S))
+ return Str;
+ if (Optional<std::string> Str = Visit(S->getLHS()))
+ return (*Str + " " + BinaryOperator::getOpcodeStr(S->getOpcode()) + " " +
+ std::to_string(S->getRHS().getLimitedValue()) +
+ (S->getRHS().isUnsigned() ? "U" : ""))
+ .str();
+ return None;
+ }
+
+ Optional<std::string> VisitSymSymExpr(const SymSymExpr *S) {
+ if (Optional<std::string> Str = lookup(S))
+ return Str;
+ if (Optional<std::string> Str1 = Visit(S->getLHS()))
+ if (Optional<std::string> Str2 = Visit(S->getRHS()))
+ return (*Str1 + " " + BinaryOperator::getOpcodeStr(S->getOpcode()) +
+ " " + *Str2).str();
+ return None;
+ }
+
+ Optional<std::string> VisitSymbolCast(const SymbolCast *S) {
+ if (Optional<std::string> Str = lookup(S))
+ return Str;
+ if (Optional<std::string> Str = Visit(S->getOperand()))
+ return (Twine("(") + S->getType().getAsString() + ")" + *Str).str();
+ return None;
+ }
+};
+} // namespace
+
+void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() == 0) {
+ reportBug("clang_analyzer_express() requires a symbol", C);
+ return;
+ }
+
+ SymbolRef Sym = C.getSVal(CE->getArg(0)).getAsSymbol();
+ if (!Sym) {
+ reportBug("Not a symbol", C);
+ return;
+ }
+
+ SymbolExpressor V(C.getState());
+ auto Str = V.Visit(Sym);
+ if (!Str) {
+ reportBug("Unable to express", C);
+ return;
+ }
+
+ reportBug(*Str, C);
+}
+
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ExprInspectionChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 059203fca730..165a4e4490eb 100644
--- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index 5cb51b01f044..248b9c3f7693 100644
--- a/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -29,7 +29,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
diff --git a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index 3ef95e673b87..818716dd6070 100644
--- a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/LangOptions.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 899586745a0b..32fed202d3ab 100644
--- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -14,7 +14,7 @@
// aggressively, even if the involved symbols are under constrained.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -28,10 +28,13 @@ using namespace clang;
using namespace ento;
namespace {
-class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
- check::PreStmt<CallExpr> > {
+class GenericTaintChecker
+ : public Checker<check::PostStmt<CallExpr>, check::PreStmt<CallExpr>> {
public:
- static void *getTag() { static int Tag; return &Tag; }
+ static void *getTag() {
+ static int Tag;
+ return &Tag;
+ }
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -69,8 +72,8 @@ private:
static Optional<SVal> getPointedToSVal(CheckerContext &C, const Expr *Arg);
/// Functions defining the attack surface.
- typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
- CheckerContext &C) const;
+ typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(
+ const CallExpr *, CheckerContext &C) const;
ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
@@ -120,16 +123,15 @@ private:
TaintPropagationRule() {}
- TaintPropagationRule(unsigned SArg,
- unsigned DArg, bool TaintRet = false) {
+ TaintPropagationRule(unsigned SArg, unsigned DArg, bool TaintRet = false) {
SrcArgs.push_back(SArg);
DstArgs.push_back(DArg);
if (TaintRet)
DstArgs.push_back(ReturnValueIndex);
}
- TaintPropagationRule(unsigned SArg1, unsigned SArg2,
- unsigned DArg, bool TaintRet = false) {
+ TaintPropagationRule(unsigned SArg1, unsigned SArg2, unsigned DArg,
+ bool TaintRet = false) {
SrcArgs.push_back(SArg1);
SrcArgs.push_back(SArg2);
DstArgs.push_back(DArg);
@@ -139,18 +141,17 @@ private:
/// Get the propagation rule for a given function.
static TaintPropagationRule
- getTaintPropagationRule(const FunctionDecl *FDecl,
- StringRef Name,
- CheckerContext &C);
+ getTaintPropagationRule(const FunctionDecl *FDecl, StringRef Name,
+ CheckerContext &C);
inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
- inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
+ inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
inline bool isNull() const { return SrcArgs.empty(); }
inline bool isDestinationArgument(unsigned ArgNum) const {
- return (std::find(DstArgs.begin(),
- DstArgs.end(), ArgNum) != DstArgs.end());
+ return (std::find(DstArgs.begin(), DstArgs.end(), ArgNum) !=
+ DstArgs.end());
}
static inline bool isTaintedOrPointsToTainted(const Expr *E,
@@ -169,7 +170,6 @@ private:
/// Pre-process a function which propagates taint according to the
/// taint rule.
ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
-
};
};
@@ -177,17 +177,18 @@ const unsigned GenericTaintChecker::ReturnValueIndex;
const unsigned GenericTaintChecker::InvalidArgIndex;
const char GenericTaintChecker::MsgUncontrolledFormatString[] =
- "Untrusted data is used as a format string "
- "(CWE-134: Uncontrolled Format String)";
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
- "Untrusted data is passed to a system call "
- "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
const char GenericTaintChecker::MsgTaintedBufferSize[] =
- "Untrusted data is used to specify the buffer size "
- "(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
- "character data and the null terminator)";
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
+ "for "
+ "character data and the null terminator)";
} // end of anonymous namespace
@@ -199,33 +200,32 @@ REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
GenericTaintChecker::TaintPropagationRule
GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
- const FunctionDecl *FDecl,
- StringRef Name,
- CheckerContext &C) {
+ const FunctionDecl *FDecl, StringRef Name, CheckerContext &C) {
// TODO: Currently, we might lose precision here: we always mark a return
// value as tainted even if it's just a pointer, pointing to tainted data.
// Check for exact name match for functions without builtin substitutes.
- TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
- .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
- .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
- .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
- .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
- .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
- .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
- .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
- .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
- .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
- .Case("read", TaintPropagationRule(0, 2, 1, true))
- .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
- .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
- .Case("fgets", TaintPropagationRule(2, 0, true))
- .Case("getline", TaintPropagationRule(2, 0))
- .Case("getdelim", TaintPropagationRule(3, 0))
- .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
- .Default(TaintPropagationRule());
+ TaintPropagationRule Rule =
+ llvm::StringSwitch<TaintPropagationRule>(Name)
+ .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("read", TaintPropagationRule(0, 2, 1, true))
+ .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
+ .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
+ .Case("fgets", TaintPropagationRule(2, 0, true))
+ .Case("getline", TaintPropagationRule(2, 0))
+ .Case("getdelim", TaintPropagationRule(3, 0))
+ .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+ .Default(TaintPropagationRule());
if (!Rule.isNull())
return Rule;
@@ -233,8 +233,8 @@ GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
// Check if it's one of the memory setting/copying functions.
// This check is specialized but faster then calling isCLibraryFunction.
unsigned BId = 0;
- if ( (BId = FDecl->getMemoryFunctionKind()) )
- switch(BId) {
+ if ((BId = FDecl->getMemoryFunctionKind()))
+ switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
@@ -305,7 +305,7 @@ void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
// First, try generating a propagation rule for this function.
TaintPropagationRule Rule =
- TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
+ TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
if (!Rule.isNull()) {
State = Rule.process(CE, C);
if (!State)
@@ -316,15 +316,14 @@ void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
// Otherwise, check if we have custom pre-processing implemented.
FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Case("fscanf", &GenericTaintChecker::preFscanf)
- .Default(nullptr);
+ .Case("fscanf", &GenericTaintChecker::preFscanf)
+ .Default(nullptr);
// Check and evaluate the call.
if (evalFunction)
State = (this->*evalFunction)(CE, C);
if (!State)
return;
C.addTransition(State);
-
}
bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
@@ -338,9 +337,10 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
if (TaintArgs.isEmpty())
return false;
- for (llvm::ImmutableSet<unsigned>::iterator
- I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
- unsigned ArgNum = *I;
+ for (llvm::ImmutableSet<unsigned>::iterator I = TaintArgs.begin(),
+ E = TaintArgs.end();
+ I != E; ++I) {
+ unsigned ArgNum = *I;
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
@@ -352,7 +352,7 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
// tainted after the call.
if (CE->getNumArgs() < (ArgNum + 1))
return false;
- const Expr* Arg = CE->getArg(ArgNum);
+ const Expr *Arg = CE->getArg(ArgNum);
Optional<SVal> V = getPointedToSVal(C, Arg);
if (V)
State = State->addTaint(*V);
@@ -379,19 +379,20 @@ void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return;
- FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Case("scanf", &GenericTaintChecker::postScanf)
- // TODO: Add support for vfscanf & family.
- .Case("getchar", &GenericTaintChecker::postRetTaint)
- .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
- .Case("getenv", &GenericTaintChecker::postRetTaint)
- .Case("fopen", &GenericTaintChecker::postRetTaint)
- .Case("fdopen", &GenericTaintChecker::postRetTaint)
- .Case("freopen", &GenericTaintChecker::postRetTaint)
- .Case("getch", &GenericTaintChecker::postRetTaint)
- .Case("wgetch", &GenericTaintChecker::postRetTaint)
- .Case("socket", &GenericTaintChecker::postSocket)
- .Default(nullptr);
+ FnCheck evalFunction =
+ llvm::StringSwitch<FnCheck>(Name)
+ .Case("scanf", &GenericTaintChecker::postScanf)
+ // TODO: Add support for vfscanf & family.
+ .Case("getchar", &GenericTaintChecker::postRetTaint)
+ .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
+ .Case("getenv", &GenericTaintChecker::postRetTaint)
+ .Case("fopen", &GenericTaintChecker::postRetTaint)
+ .Case("fdopen", &GenericTaintChecker::postRetTaint)
+ .Case("freopen", &GenericTaintChecker::postRetTaint)
+ .Case("getch", &GenericTaintChecker::postRetTaint)
+ .Case("wgetch", &GenericTaintChecker::postRetTaint)
+ .Case("socket", &GenericTaintChecker::postSocket)
+ .Default(nullptr);
// If the callee isn't defined, it is not of security concern.
// Check and evaluate the call.
@@ -404,7 +405,8 @@ void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
C.addTransition(State);
}
-bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
+bool GenericTaintChecker::checkPre(const CallExpr *CE,
+ CheckerContext &C) const {
if (checkUncontrolledFormatString(CE, C))
return true;
@@ -458,8 +460,8 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
// Check for taint in arguments.
bool IsTainted = false;
- for (ArgVector::const_iterator I = SrcArgs.begin(),
- E = SrcArgs.end(); I != E; ++I) {
+ for (ArgVector::const_iterator I = SrcArgs.begin(), E = SrcArgs.end(); I != E;
+ ++I) {
unsigned ArgNum = *I;
if (ArgNum == InvalidArgIndex) {
@@ -483,8 +485,8 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
return State;
// Mark the arguments which should be tainted after the function returns.
- for (ArgVector::const_iterator I = DstArgs.begin(),
- E = DstArgs.end(); I != E; ++I) {
+ for (ArgVector::const_iterator I = DstArgs.begin(), E = DstArgs.end(); I != E;
+ ++I) {
unsigned ArgNum = *I;
// Should we mark all arguments as tainted?
@@ -498,8 +500,8 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
// Process pointer argument.
const Type *ArgTy = Arg->getType().getTypePtr();
QualType PType = ArgTy->getPointeeType();
- if ((!PType.isNull() && !PType.isConstQualified())
- || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ if ((!PType.isNull() && !PType.isConstQualified()) ||
+ (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
State = State->add<TaintArgsOnPostVisit>(i);
}
continue;
@@ -519,11 +521,10 @@ GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
return State;
}
-
// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
// and arg 1 should get taint.
ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
- CheckerContext &C) const {
+ CheckerContext &C) const {
assert(CE->getNumArgs() >= 2);
ProgramStateRef State = C.getState();
@@ -532,14 +533,13 @@ ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
isStdin(CE->getArg(0), C)) {
// All arguments except for the first two should get taint.
for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
- State = State->add<TaintArgsOnPostVisit>(i);
+ State = State->add<TaintArgsOnPostVisit>(i);
return State;
}
return nullptr;
}
-
// If argument 0(protocol domain) is network, the return value should get taint.
ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
CheckerContext &C) const {
@@ -558,7 +558,7 @@ ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
}
ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
- CheckerContext &C) const {
+ CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (CE->getNumArgs() < 2)
return State;
@@ -567,7 +567,7 @@ ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- const Expr* Arg = CE->getArg(i);
+ const Expr *Arg = CE->getArg(i);
Optional<SVal> V = getPointedToSVal(C, Arg);
if (V)
State = State->addTaint(*V);
@@ -593,7 +593,8 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
return false;
// Get it's symbol and find the declaration region it's pointing to.
- const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ const SymbolRegionValue *Sm =
+ dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
if (!Sm)
return false;
const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
@@ -605,11 +606,11 @@ bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
D = D->getCanonicalDecl();
if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
- if (const PointerType * PtrTy =
+ if (const PointerType *PtrTy =
dyn_cast<PointerType>(D->getType().getTypePtr()))
- if (PtrTy->getPointeeType().getCanonicalType() ==
- C.getASTContext().getFILEType().getCanonicalType())
- return true;
+ if (PtrTy->getPointeeType().getCanonicalType() ==
+ C.getASTContext().getFILEType().getCanonicalType())
+ return true;
}
return false;
}
@@ -625,8 +626,7 @@ static bool getPrintfFormatArgumentNum(const CallExpr *CE,
return false;
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
- if ((Format->getType()->getName() == "printf") &&
- CE->getNumArgs() > ArgNum)
+ if ((Format->getType()->getName() == "printf") && CE->getNumArgs() > ArgNum)
return true;
}
@@ -667,36 +667,36 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
return false;
}
-bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
- CheckerContext &C) const{
+bool GenericTaintChecker::checkUncontrolledFormatString(
+ const CallExpr *CE, CheckerContext &C) const {
// Check if the function contains a format string argument.
unsigned int ArgNum = 0;
if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
return false;
- // If either the format string content or the pointer itself are tainted, warn.
+ // If either the format string content or the pointer itself are tainted,
+ // warn.
return generateReportIfTainted(CE->getArg(ArgNum),
MsgUncontrolledFormatString, C);
}
-bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
- StringRef Name,
+bool GenericTaintChecker::checkSystemCall(const CallExpr *CE, StringRef Name,
CheckerContext &C) const {
// TODO: It might make sense to run this check on demand. In some cases,
// we should check if the environment has been cleansed here. We also might
// need to know if the user was reset before these calls(seteuid).
unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
- .Case("system", 0)
- .Case("popen", 0)
- .Case("execl", 0)
- .Case("execle", 0)
- .Case("execlp", 0)
- .Case("execv", 0)
- .Case("execvp", 0)
- .Case("execvP", 0)
- .Case("execve", 0)
- .Case("dlopen", 0)
- .Default(UINT_MAX);
+ .Case("system", 0)
+ .Case("popen", 0)
+ .Case("execl", 0)
+ .Case("execle", 0)
+ .Case("execlp", 0)
+ .Case("execv", 0)
+ .Case("execvp", 0)
+ .Case("execvP", 0)
+ .Case("execve", 0)
+ .Case("dlopen", 0)
+ .Default(UINT_MAX);
if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
return false;
@@ -712,8 +712,8 @@ bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
// If the function has a buffer size argument, set ArgNum.
unsigned ArgNum = InvalidArgIndex;
unsigned BId = 0;
- if ( (BId = FDecl->getMemoryFunctionKind()) )
- switch(BId) {
+ if ((BId = FDecl->getMemoryFunctionKind()))
+ switch (BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
diff --git a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
index f102ca96a5c1..4c2a229428d9 100644
--- a/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp
@@ -16,7 +16,7 @@
///
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index 29677f737f5c..a4f47d727a8f 100644
--- a/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -14,7 +14,8 @@
//===----------------------------------------------------------------------===//
#include "AllocationState.h"
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "InterCheckerAPI.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -24,23 +25,10 @@
using namespace clang;
using namespace ento;
-using PtrSet = llvm::ImmutableSet<SymbolRef>;
-
// Associate container objects with a set of raw pointer symbols.
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(PtrSet, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(RawPtrMap, const MemRegion *, PtrSet)
-// This is a trick to gain access to PtrSet's Factory.
-namespace clang {
-namespace ento {
-template <>
-struct ProgramStateTrait<PtrSet> : public ProgramStatePartialTrait<PtrSet> {
- static void *GDMIndex() {
- static int Index = 0;
- return &Index;
- }
-};
-} // end namespace ento
-} // end namespace clang
namespace {
@@ -67,8 +55,7 @@ public:
ID.AddPointer(getTag());
}
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
+ virtual std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -85,14 +72,20 @@ public:
};
InnerPointerChecker()
- : AppendFn("append"), AssignFn("assign"), ClearFn("clear"),
- CStrFn("c_str"), DataFn("data"), EraseFn("erase"), InsertFn("insert"),
- PopBackFn("pop_back"), PushBackFn("push_back"), ReplaceFn("replace"),
- ReserveFn("reserve"), ResizeFn("resize"),
- ShrinkToFitFn("shrink_to_fit"), SwapFn("swap") {}
-
- /// Check if the object of this member function call is a `basic_string`.
- bool isCalledOnStringObject(const CXXInstanceCall *ICall) const;
+ : AppendFn({"std", "basic_string", "append"}),
+ AssignFn({"std", "basic_string", "assign"}),
+ ClearFn({"std", "basic_string", "clear"}),
+ CStrFn({"std", "basic_string", "c_str"}),
+ DataFn({"std", "basic_string", "data"}),
+ EraseFn({"std", "basic_string", "erase"}),
+ InsertFn({"std", "basic_string", "insert"}),
+ PopBackFn({"std", "basic_string", "pop_back"}),
+ PushBackFn({"std", "basic_string", "push_back"}),
+ ReplaceFn({"std", "basic_string", "replace"}),
+ ReserveFn({"std", "basic_string", "reserve"}),
+ ResizeFn({"std", "basic_string", "resize"}),
+ ShrinkToFitFn({"std", "basic_string", "shrink_to_fit"}),
+ SwapFn({"std", "basic_string", "swap"}) {}
/// Check whether the called member function potentially invalidates
/// pointers referring to the container object's inner buffer.
@@ -121,21 +114,6 @@ public:
} // end anonymous namespace
-bool InnerPointerChecker::isCalledOnStringObject(
- const CXXInstanceCall *ICall) const {
- const auto *ObjRegion =
- dyn_cast_or_null<TypedValueRegion>(ICall->getCXXThisVal().getAsRegion());
- if (!ObjRegion)
- return false;
-
- QualType ObjTy = ObjRegion->getValueType();
- if (ObjTy.isNull() ||
- ObjTy->getAsCXXRecordDecl()->getName() != "basic_string")
- return false;
-
- return true;
-}
-
bool InnerPointerChecker::isInvalidatingMemberFunction(
const CallEvent &Call) const {
if (const auto *MemOpCall = dyn_cast<CXXMemberOperatorCall>(&Call)) {
@@ -219,33 +197,34 @@ void InnerPointerChecker::checkPostCall(const CallEvent &Call,
ProgramStateRef State = C.getState();
if (const auto *ICall = dyn_cast<CXXInstanceCall>(&Call)) {
- if (isCalledOnStringObject(ICall)) {
- const auto *ObjRegion = dyn_cast_or_null<TypedValueRegion>(
- ICall->getCXXThisVal().getAsRegion());
-
- if (Call.isCalled(CStrFn) || Call.isCalled(DataFn)) {
- SVal RawPtr = Call.getReturnValue();
- if (SymbolRef Sym = RawPtr.getAsSymbol(/*IncludeBaseRegions=*/true)) {
- // Start tracking this raw pointer by adding it to the set of symbols
- // associated with this container object in the program state map.
-
- PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
- const PtrSet *SetPtr = State->get<RawPtrMap>(ObjRegion);
- PtrSet Set = SetPtr ? *SetPtr : F.getEmptySet();
- assert(C.wasInlined || !Set.contains(Sym));
- Set = F.add(Set, Sym);
-
- State = State->set<RawPtrMap>(ObjRegion, Set);
- C.addTransition(State);
- }
- return;
- }
+ // TODO: Do we need these to be typed?
+ const auto *ObjRegion = dyn_cast_or_null<TypedValueRegion>(
+ ICall->getCXXThisVal().getAsRegion());
+ if (!ObjRegion)
+ return;
+
+ if (Call.isCalled(CStrFn) || Call.isCalled(DataFn)) {
+ SVal RawPtr = Call.getReturnValue();
+ if (SymbolRef Sym = RawPtr.getAsSymbol(/*IncludeBaseRegions=*/true)) {
+ // Start tracking this raw pointer by adding it to the set of symbols
+ // associated with this container object in the program state map.
- // Check [string.require] / second point.
- if (isInvalidatingMemberFunction(Call)) {
- markPtrSymbolsReleased(Call, State, ObjRegion, C);
- return;
+ PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
+ const PtrSet *SetPtr = State->get<RawPtrMap>(ObjRegion);
+ PtrSet Set = SetPtr ? *SetPtr : F.getEmptySet();
+ assert(C.wasInlined || !Set.contains(Sym));
+ Set = F.add(Set, Sym);
+
+ State = State->set<RawPtrMap>(ObjRegion, Set);
+ C.addTransition(State);
}
+ return;
+ }
+
+ // Check [string.require] / second point.
+ if (isInvalidatingMemberFunction(Call)) {
+ markPtrSymbolsReleased(Call, State, ObjRegion, C);
+ return;
}
}
@@ -278,41 +257,56 @@ void InnerPointerChecker::checkDeadSymbols(SymbolReaper &SymReaper,
C.addTransition(State);
}
+namespace clang {
+namespace ento {
+namespace allocation_state {
+
+std::unique_ptr<BugReporterVisitor> getInnerPointerBRVisitor(SymbolRef Sym) {
+ return llvm::make_unique<InnerPointerChecker::InnerPointerBRVisitor>(Sym);
+}
+
+const MemRegion *getContainerObjRegion(ProgramStateRef State, SymbolRef Sym) {
+ RawPtrMapTy Map = State->get<RawPtrMap>();
+ for (const auto Entry : Map) {
+ if (Entry.second.contains(Sym)) {
+ return Entry.first;
+ }
+ }
+ return nullptr;
+}
+
+} // end namespace allocation_state
+} // end namespace ento
+} // end namespace clang
+
std::shared_ptr<PathDiagnosticPiece>
InnerPointerChecker::InnerPointerBRVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
- BugReport &BR) {
+ BugReport &) {
if (!isSymbolTracked(N->getState(), PtrToBuf) ||
- isSymbolTracked(PrevN->getState(), PtrToBuf))
+ isSymbolTracked(N->getFirstPred()->getState(), PtrToBuf))
return nullptr;
const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
return nullptr;
+ const MemRegion *ObjRegion =
+ allocation_state::getContainerObjRegion(N->getState(), PtrToBuf);
+ const auto *TypedRegion = cast<TypedValueRegion>(ObjRegion);
+ QualType ObjTy = TypedRegion->getValueType();
+
SmallString<256> Buf;
llvm::raw_svector_ostream OS(Buf);
- OS << "Dangling inner pointer obtained here";
+ OS << "Pointer to inner buffer of '" << ObjTy.getAsString()
+ << "' obtained here";
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true,
nullptr);
}
-namespace clang {
-namespace ento {
-namespace allocation_state {
-
-std::unique_ptr<BugReporterVisitor> getInnerPointerBRVisitor(SymbolRef Sym) {
- return llvm::make_unique<InnerPointerChecker::InnerPointerBRVisitor>(Sym);
-}
-
-} // end namespace allocation_state
-} // end namespace ento
-} // end namespace clang
-
void ento::registerInnerPointerChecker(CheckerManager &Mgr) {
- registerNewDeleteChecker(Mgr);
+ registerInnerPointerCheckerAux(Mgr);
Mgr.registerChecker<InnerPointerChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
index d38d63cd05ce..81c95a4813a6 100644
--- a/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
+++ b/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -20,5 +20,8 @@ namespace ento {
/// Register the checker which evaluates CString API calls.
void registerCStringCheckerBasic(CheckerManager &Mgr);
+/// Register the part of MallocChecker connected to InnerPointerChecker.
+void registerInnerPointerCheckerAux(CheckerManager &Mgr);
+
}}
#endif /* INTERCHECKERAPI_H_ */
diff --git a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
index 520c32e1c770..e719e19d68e9 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorChecker.cpp
@@ -66,11 +66,15 @@
// making an assumption e.g. `S1 + n == S2 + m` we store `S1 - S2 == m - n` as
// a constraint which we later retrieve when doing an actual comparison.
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+
+#include <utility>
using namespace clang;
using namespace ento;
@@ -85,34 +89,47 @@ private:
// Container the iterator belongs to
const MemRegion *Cont;
+ // Whether iterator is valid
+ const bool Valid;
+
// Abstract offset
const SymbolRef Offset;
- IteratorPosition(const MemRegion *C, SymbolRef Of)
- : Cont(C), Offset(Of) {}
+ IteratorPosition(const MemRegion *C, bool V, SymbolRef Of)
+ : Cont(C), Valid(V), Offset(Of) {}
public:
const MemRegion *getContainer() const { return Cont; }
+ bool isValid() const { return Valid; }
SymbolRef getOffset() const { return Offset; }
+ IteratorPosition invalidate() const {
+ return IteratorPosition(Cont, false, Offset);
+ }
+
static IteratorPosition getPosition(const MemRegion *C, SymbolRef Of) {
- return IteratorPosition(C, Of);
+ return IteratorPosition(C, true, Of);
}
IteratorPosition setTo(SymbolRef NewOf) const {
- return IteratorPosition(Cont, NewOf);
+ return IteratorPosition(Cont, Valid, NewOf);
+ }
+
+ IteratorPosition reAssign(const MemRegion *NewCont) const {
+ return IteratorPosition(NewCont, Valid, Offset);
}
bool operator==(const IteratorPosition &X) const {
- return Cont == X.Cont && Offset == X.Offset;
+ return Cont == X.Cont && Valid == X.Valid && Offset == X.Offset;
}
bool operator!=(const IteratorPosition &X) const {
- return Cont != X.Cont || Offset != X.Offset;
+ return Cont != X.Cont || Valid != X.Valid || Offset != X.Offset;
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(Cont);
+ ID.AddInteger(Valid);
ID.Add(Offset);
}
};
@@ -181,15 +198,17 @@ public:
class IteratorChecker
: public Checker<check::PreCall, check::PostCall,
- check::PreStmt<CXXOperatorCallExpr>,
- check::PostStmt<MaterializeTemporaryExpr>,
+ check::PostStmt<MaterializeTemporaryExpr>, check::Bind,
check::LiveSymbols, check::DeadSymbols,
eval::Assume> {
std::unique_ptr<BugType> OutOfRangeBugType;
+ std::unique_ptr<BugType> MismatchedBugType;
+ std::unique_ptr<BugType> InvalidatedBugType;
void handleComparison(CheckerContext &C, const SVal &RetVal, const SVal &LVal,
const SVal &RVal, OverloadedOperatorKind Op) const;
+ void verifyAccess(CheckerContext &C, const SVal &Val) const;
void verifyDereference(CheckerContext &C, const SVal &Val) const;
void handleIncrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
bool Postfix) const;
@@ -204,17 +223,50 @@ class IteratorChecker
const SVal &Cont) const;
void assignToContainer(CheckerContext &C, const Expr *CE, const SVal &RetVal,
const MemRegion *Cont) const;
+ void handleAssign(CheckerContext &C, const SVal &Cont,
+ const Expr *CE = nullptr,
+ const SVal &OldCont = UndefinedVal()) const;
+ void handleClear(CheckerContext &C, const SVal &Cont) const;
+ void handlePushBack(CheckerContext &C, const SVal &Cont) const;
+ void handlePopBack(CheckerContext &C, const SVal &Cont) const;
+ void handlePushFront(CheckerContext &C, const SVal &Cont) const;
+ void handlePopFront(CheckerContext &C, const SVal &Cont) const;
+ void handleInsert(CheckerContext &C, const SVal &Iter) const;
+ void handleErase(CheckerContext &C, const SVal &Iter) const;
+ void handleErase(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const;
+ void handleEraseAfter(CheckerContext &C, const SVal &Iter) const;
+ void handleEraseAfter(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const;
+ void verifyIncrement(CheckerContext &C, const SVal &Iter) const;
+ void verifyDecrement(CheckerContext &C, const SVal &Iter) const;
void verifyRandomIncrOrDecr(CheckerContext &C, OverloadedOperatorKind Op,
- const SVal &RetVal, const SVal &LHS,
- const SVal &RHS) const;
+ const SVal &LHS, const SVal &RHS) const;
+ void verifyMatch(CheckerContext &C, const SVal &Iter,
+ const MemRegion *Cont) const;
+ void verifyMatch(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const;
+ IteratorPosition advancePosition(CheckerContext &C, OverloadedOperatorKind Op,
+ const IteratorPosition &Pos,
+ const SVal &Distance) const;
void reportOutOfRangeBug(const StringRef &Message, const SVal &Val,
CheckerContext &C, ExplodedNode *ErrNode) const;
+ void reportMismatchedBug(const StringRef &Message, const SVal &Val1,
+ const SVal &Val2, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+ void reportMismatchedBug(const StringRef &Message, const SVal &Val,
+ const MemRegion *Reg, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+ void reportInvalidatedBug(const StringRef &Message, const SVal &Val,
+ CheckerContext &C, ExplodedNode *ErrNode) const;
public:
IteratorChecker();
enum CheckKind {
CK_IteratorRangeChecker,
+ CK_MismatchedIteratorChecker,
+ CK_InvalidatedIteratorChecker,
CK_NumCheckKinds
};
@@ -223,7 +275,9 @@ public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPreStmt(const CXXOperatorCallExpr *COCE, CheckerContext &C) const;
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const CXXConstructExpr *CCE, CheckerContext &C) const;
+ void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
@@ -246,13 +300,31 @@ namespace {
bool isIteratorType(const QualType &Type);
bool isIterator(const CXXRecordDecl *CRD);
+bool isComparisonOperator(OverloadedOperatorKind OK);
bool isBeginCall(const FunctionDecl *Func);
bool isEndCall(const FunctionDecl *Func);
+bool isAssignCall(const FunctionDecl *Func);
+bool isClearCall(const FunctionDecl *Func);
+bool isPushBackCall(const FunctionDecl *Func);
+bool isEmplaceBackCall(const FunctionDecl *Func);
+bool isPopBackCall(const FunctionDecl *Func);
+bool isPushFrontCall(const FunctionDecl *Func);
+bool isEmplaceFrontCall(const FunctionDecl *Func);
+bool isPopFrontCall(const FunctionDecl *Func);
+bool isInsertCall(const FunctionDecl *Func);
+bool isEraseCall(const FunctionDecl *Func);
+bool isEraseAfterCall(const FunctionDecl *Func);
+bool isEmplaceCall(const FunctionDecl *Func);
+bool isAssignmentOperator(OverloadedOperatorKind OK);
bool isSimpleComparisonOperator(OverloadedOperatorKind OK);
+bool isAccessOperator(OverloadedOperatorKind OK);
bool isDereferenceOperator(OverloadedOperatorKind OK);
bool isIncrementOperator(OverloadedOperatorKind OK);
bool isDecrementOperator(OverloadedOperatorKind OK);
bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg);
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg);
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg);
BinaryOperator::Opcode getOpcode(const SymExpr *SE);
const RegionOrSymbol getRegionOrSymbol(const SVal &Val);
const ProgramStateRef processComparison(ProgramStateRef State,
@@ -287,12 +359,41 @@ ProgramStateRef relateIteratorPositions(ProgramStateRef State,
const IteratorPosition &Pos1,
const IteratorPosition &Pos2,
bool Equal);
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont);
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2);
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont);
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc);
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc);
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont);
ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
const ContainerData &CData);
bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont);
-bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos);
+bool isBoundThroughLazyCompoundVal(const Environment &Env,
+ const MemRegion *Reg);
+bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
+bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos);
+bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
bool isZero(ProgramStateRef State, const NonLoc &Val);
} // namespace
@@ -300,39 +401,198 @@ IteratorChecker::IteratorChecker() {
OutOfRangeBugType.reset(
new BugType(this, "Iterator out of range", "Misuse of STL APIs"));
OutOfRangeBugType->setSuppressOnSink(true);
+ MismatchedBugType.reset(
+ new BugType(this, "Iterator(s) mismatched", "Misuse of STL APIs"));
+ MismatchedBugType->setSuppressOnSink(true);
+ InvalidatedBugType.reset(
+ new BugType(this, "Iterator invalidated", "Misuse of STL APIs"));
+ InvalidatedBugType->setSuppressOnSink(true);
}
void IteratorChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- // Check for out of range access
+ // Check for out of range access or access of invalidated position and
+ // iterator mismatches
const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!Func)
return;
if (Func->isOverloadedOperator()) {
- if (ChecksEnabled[CK_IteratorRangeChecker] &&
- isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
+ if (ChecksEnabled[CK_InvalidatedIteratorChecker] &&
+ isAccessOperator(Func->getOverloadedOperator())) {
+ // Check for any kind of access of invalidated iterator positions
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- // Check for out-of-range incrementions and decrementions
- if (Call.getNumArgs() >= 1) {
- verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
- Call.getReturnValue(),
- InstCall->getCXXThisVal(), Call.getArgSVal(0));
- }
+ verifyAccess(C, InstCall->getCXXThisVal());
} else {
- if (Call.getNumArgs() >= 2) {
- verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
- Call.getReturnValue(), Call.getArgSVal(0),
- Call.getArgSVal(1));
+ verifyAccess(C, Call.getArgSVal(0));
+ }
+ }
+ if (ChecksEnabled[CK_IteratorRangeChecker]) {
+ if (isIncrementOperator(Func->getOverloadedOperator())) {
+ // Check for out-of-range incrementions
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyIncrement(C, InstCall->getCXXThisVal());
+ } else {
+ if (Call.getNumArgs() >= 1) {
+ verifyIncrement(C, Call.getArgSVal(0));
+ }
+ }
+ } else if (isDecrementOperator(Func->getOverloadedOperator())) {
+ // Check for out-of-range decrementions
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyDecrement(C, InstCall->getCXXThisVal());
+ } else {
+ if (Call.getNumArgs() >= 1) {
+ verifyDecrement(C, Call.getArgSVal(0));
+ }
+ }
+ } else if (isRandomIncrOrDecrOperator(Func->getOverloadedOperator())) {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ // Check for out-of-range incrementions and decrementions
+ if (Call.getNumArgs() >= 1) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ InstCall->getCXXThisVal(),
+ Call.getArgSVal(0));
+ }
+ } else {
+ if (Call.getNumArgs() >= 2) {
+ verifyRandomIncrOrDecr(C, Func->getOverloadedOperator(),
+ Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ } else if (isDereferenceOperator(Func->getOverloadedOperator())) {
+ // Check for dereference of out-of-range iterators
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ verifyDereference(C, InstCall->getCXXThisVal());
+ } else {
+ verifyDereference(C, Call.getArgSVal(0));
}
}
- } else if (ChecksEnabled[CK_IteratorRangeChecker] &&
- isDereferenceOperator(Func->getOverloadedOperator())) {
- // Check for dereference of out-of-range iterators
+ } else if (ChecksEnabled[CK_MismatchedIteratorChecker] &&
+ isComparisonOperator(Func->getOverloadedOperator())) {
+ // Check for comparisons of iterators of different containers
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
- verifyDereference(C, InstCall->getCXXThisVal());
+ if (Call.getNumArgs() < 1)
+ return;
+
+ if (!isIteratorType(InstCall->getCXXThisExpr()->getType()) ||
+ !isIteratorType(Call.getArgExpr(0)->getType()))
+ return;
+
+ verifyMatch(C, InstCall->getCXXThisVal(), Call.getArgSVal(0));
} else {
- verifyDereference(C, Call.getArgSVal(0));
+ if (Call.getNumArgs() < 2)
+ return;
+
+ if (!isIteratorType(Call.getArgExpr(0)->getType()) ||
+ !isIteratorType(Call.getArgExpr(1)->getType()))
+ return;
+
+ verifyMatch(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ } else if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (!ChecksEnabled[CK_MismatchedIteratorChecker])
+ return;
+
+ const auto *ContReg = InstCall->getCXXThisVal().getAsRegion();
+ if (!ContReg)
+ return;
+ // Check for erase, insert and emplace using iterator of another container
+ if (isEraseCall(Func) || isEraseAfterCall(Func)) {
+ verifyMatch(C, Call.getArgSVal(0),
+ InstCall->getCXXThisVal().getAsRegion());
+ if (Call.getNumArgs() == 2) {
+ verifyMatch(C, Call.getArgSVal(1),
+ InstCall->getCXXThisVal().getAsRegion());
+ }
+ } else if (isInsertCall(Func)) {
+ verifyMatch(C, Call.getArgSVal(0),
+ InstCall->getCXXThisVal().getAsRegion());
+ if (Call.getNumArgs() == 3 &&
+ isIteratorType(Call.getArgExpr(1)->getType()) &&
+ isIteratorType(Call.getArgExpr(2)->getType())) {
+ verifyMatch(C, Call.getArgSVal(1), Call.getArgSVal(2));
+ }
+ } else if (isEmplaceCall(Func)) {
+ verifyMatch(C, Call.getArgSVal(0),
+ InstCall->getCXXThisVal().getAsRegion());
+ }
+ } else if (isa<CXXConstructorCall>(&Call)) {
+ // Check match of first-last iterator pair in a constructor of a container
+ if (Call.getNumArgs() < 2)
+ return;
+
+ const auto *Ctr = cast<CXXConstructorDecl>(Call.getDecl());
+ if (Ctr->getNumParams() < 2)
+ return;
+
+ if (Ctr->getParamDecl(0)->getName() != "first" ||
+ Ctr->getParamDecl(1)->getName() != "last")
+ return;
+
+ if (!isIteratorType(Call.getArgExpr(0)->getType()) ||
+ !isIteratorType(Call.getArgExpr(1)->getType()))
+ return;
+
+ verifyMatch(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ } else {
+ // The main purpose of iterators is to abstract away from different
+ // containers and provide a (maybe limited) uniform access to them.
+ // This implies that any correctly written template function that
+ // works on multiple containers using iterators takes different
+ // template parameters for different containers. So we can safely
+ // assume that passing iterators of different containers as arguments
+ // whose type replaces the same template parameter is a bug.
+ //
+ // Example:
+ // template<typename I1, typename I2>
+ // void f(I1 first1, I1 last1, I2 first2, I2 last2);
+ //
+ // In this case the first two arguments to f() must be iterators must belong
+ // to the same container and the last to also to the same container but
+ // not necessarily to the same as the first two.
+
+ if (!ChecksEnabled[CK_MismatchedIteratorChecker])
+ return;
+
+ const auto *Templ = Func->getPrimaryTemplate();
+ if (!Templ)
+ return;
+
+ const auto *TParams = Templ->getTemplateParameters();
+ const auto *TArgs = Func->getTemplateSpecializationArgs();
+
+ // Iterate over all the template parameters
+ for (size_t I = 0; I < TParams->size(); ++I) {
+ const auto *TPDecl = dyn_cast<TemplateTypeParmDecl>(TParams->getParam(I));
+ if (!TPDecl)
+ continue;
+
+ if (TPDecl->isParameterPack())
+ continue;
+
+ const auto TAType = TArgs->get(I).getAsType();
+ if (!isIteratorType(TAType))
+ continue;
+
+ SVal LHS = UndefinedVal();
+
+ // For every template parameter which is an iterator type in the
+ // instantiation look for all functions' parameters' type by it and
+ // check whether they belong to the same container
+ for (auto J = 0U; J < Func->getNumParams(); ++J) {
+ const auto *Param = Func->getParamDecl(J);
+ const auto *ParamType =
+ Param->getType()->getAs<SubstTemplateTypeParmType>();
+ if (!ParamType ||
+ ParamType->getReplacedParameter()->getDecl() != TPDecl)
+ continue;
+ if (LHS.isUndef()) {
+ LHS = Call.getArgSVal(J);
+ } else {
+ verifyMatch(C, LHS, Call.getArgSVal(J));
+ }
}
}
}
@@ -347,7 +607,15 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
if (Func->isOverloadedOperator()) {
const auto Op = Func->getOverloadedOperator();
- if (isSimpleComparisonOperator(Op)) {
+ if (isAssignmentOperator(Op)) {
+ const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call);
+ if (Func->getParamDecl(0)->getType()->isRValueReferenceType()) {
+ handleAssign(C, InstCall->getCXXThisVal(), Call.getOriginExpr(),
+ Call.getArgSVal(0));
+ } else {
+ handleAssign(C, InstCall->getCXXThisVal());
+ }
+ } else if (isSimpleComparisonOperator(Op)) {
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
handleComparison(C, Call.getReturnValue(), InstCall->getCXXThisVal(),
Call.getArgSVal(0), Op);
@@ -387,6 +655,36 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
}
}
} else {
+ if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
+ if (isAssignCall(Func)) {
+ handleAssign(C, InstCall->getCXXThisVal());
+ } else if (isClearCall(Func)) {
+ handleClear(C, InstCall->getCXXThisVal());
+ } else if (isPushBackCall(Func) || isEmplaceBackCall(Func)) {
+ handlePushBack(C, InstCall->getCXXThisVal());
+ } else if (isPopBackCall(Func)) {
+ handlePopBack(C, InstCall->getCXXThisVal());
+ } else if (isPushFrontCall(Func) || isEmplaceFrontCall(Func)) {
+ handlePushFront(C, InstCall->getCXXThisVal());
+ } else if (isPopFrontCall(Func)) {
+ handlePopFront(C, InstCall->getCXXThisVal());
+ } else if (isInsertCall(Func) || isEmplaceCall(Func)) {
+ handleInsert(C, Call.getArgSVal(0));
+ } else if (isEraseCall(Func)) {
+ if (Call.getNumArgs() == 1) {
+ handleErase(C, Call.getArgSVal(0));
+ } else if (Call.getNumArgs() == 2) {
+ handleErase(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ } else if (isEraseAfterCall(Func)) {
+ if (Call.getNumArgs() == 1) {
+ handleEraseAfter(C, Call.getArgSVal(0));
+ } else if (Call.getNumArgs() == 2) {
+ handleEraseAfter(C, Call.getArgSVal(0), Call.getArgSVal(1));
+ }
+ }
+ }
+
const auto *OrigExpr = Call.getOriginExpr();
if (!OrigExpr)
return;
@@ -395,9 +693,6 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
return;
auto State = C.getState();
- // Already bound to container?
- if (getIteratorPosition(State, Call.getReturnValue()))
- return;
if (const auto *InstCall = dyn_cast<CXXInstanceCall>(&Call)) {
if (isBeginCall(Func)) {
@@ -412,6 +707,10 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
}
}
+ // Already bound to container?
+ if (getIteratorPosition(State, Call.getReturnValue()))
+ return;
+
// Copy-like and move constructors
if (isa<CXXConstructorCall>(&Call) && Call.getNumArgs() == 1) {
if (const auto *Pos = getIteratorPosition(State, Call.getArgSVal(0))) {
@@ -441,33 +740,19 @@ void IteratorChecker::checkPostCall(const CallEvent &Call,
}
}
-void IteratorChecker::checkPreStmt(const CXXOperatorCallExpr *COCE,
- CheckerContext &C) const {
- const auto *ThisExpr = COCE->getArg(0);
-
+void IteratorChecker::checkBind(SVal Loc, SVal Val, const Stmt *S,
+ CheckerContext &C) const {
auto State = C.getState();
- const auto *LCtx = C.getLocationContext();
-
- const auto CurrentThis = State->getSVal(ThisExpr, LCtx);
- if (const auto *Reg = CurrentThis.getAsRegion()) {
- if (!Reg->getAs<CXXTempObjectRegion>())
- return;
- const auto OldState = C.getPredecessor()->getFirstPred()->getState();
- const auto OldThis = OldState->getSVal(ThisExpr, LCtx);
- // FIXME: This solution is unreliable. It may happen that another checker
- // subscribes to the pre-statement check of `CXXOperatorCallExpr`
- // and adds a transition before us. The proper fix is to make the
- // CFG provide a `ConstructionContext` for the `CXXOperatorCallExpr`,
- // which would turn the corresponding `CFGStmt` element into a
- // `CFGCXXRecordTypedCall` element, which will allow `ExprEngine` to
- // foresee that the `begin()`/`end()` call constructs the object
- // directly in the temporary region that `CXXOperatorCallExpr` takes
- // as its implicit object argument.
- const auto *Pos = getIteratorPosition(OldState, OldThis);
- if (!Pos)
- return;
- State = setIteratorPosition(State, CurrentThis, *Pos);
+ const auto *Pos = getIteratorPosition(State, Val);
+ if (Pos) {
+ State = setIteratorPosition(State, Loc, *Pos);
C.addTransition(State);
+ } else {
+ const auto *OldPos = getIteratorPosition(State, Loc);
+ if (OldPos) {
+ State = removeIteratorPosition(State, Loc);
+ C.addTransition(State);
+ }
}
}
@@ -508,9 +793,13 @@ void IteratorChecker::checkLiveSymbols(ProgramStateRef State,
const auto CData = Cont.second;
if (CData.getBegin()) {
SR.markLive(CData.getBegin());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getBegin()))
+ SR.markLive(SIE->getLHS());
}
if (CData.getEnd()) {
SR.markLive(CData.getEnd());
+ if(const auto *SIE = dyn_cast<SymIntExpr>(CData.getEnd()))
+ SR.markLive(SIE->getLHS());
}
}
}
@@ -523,7 +812,12 @@ void IteratorChecker::checkDeadSymbols(SymbolReaper &SR,
auto RegionMap = State->get<IteratorRegionMap>();
for (const auto Reg : RegionMap) {
if (!SR.isLiveRegion(Reg.first)) {
- State = State->remove<IteratorRegionMap>(Reg.first);
+ // The region behind the `LazyCompoundVal` is often cleaned up before
+ // the `LazyCompoundVal` itself. If there are iterator positions keyed
+ // by these regions their cleanup must be deferred.
+ if (!isBoundThroughLazyCompoundVal(State->getEnvironment(), Reg.first)) {
+ State = State->remove<IteratorRegionMap>(Reg.first);
+ }
}
}
@@ -623,14 +917,24 @@ void IteratorChecker::verifyDereference(CheckerContext &C,
const SVal &Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
- if (Pos && isOutOfRange(State, *Pos)) {
- // If I do not put a tag here, some range tests will fail
- static CheckerProgramPointTag Tag("IteratorRangeChecker",
- "IteratorOutOfRange");
- auto *N = C.generateNonFatalErrorNode(State, &Tag);
+ if (Pos && isPastTheEnd(State, *Pos)) {
+ auto *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- reportOutOfRangeBug("Iterator accessed outside of its range.", Val, C, N);
+ reportOutOfRangeBug("Past-the-end iterator dereferenced.", Val, C, N);
+ return;
+ }
+}
+
+void IteratorChecker::verifyAccess(CheckerContext &C, const SVal &Val) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Val);
+ if (Pos && !Pos->isValid()) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N) {
+ return;
+ }
+ reportInvalidatedBug("Invalidated iterator accessed.", Val, C, N);
}
}
@@ -643,14 +947,9 @@ void IteratorChecker::handleIncrement(CheckerContext &C, const SVal &RetVal,
if (Pos) {
auto &SymMgr = C.getSymbolManager();
auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto OldOffset = Pos->getOffset();
- auto NewOffset =
- SVB.evalBinOp(State, BO_Add,
- nonloc::SymbolVal(OldOffset),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
+ const auto NewPos =
+ advancePosition(C, OO_Plus, *Pos,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
State = setIteratorPosition(State, Iter, NewPos);
State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
C.addTransition(State);
@@ -666,14 +965,9 @@ void IteratorChecker::handleDecrement(CheckerContext &C, const SVal &RetVal,
if (Pos) {
auto &SymMgr = C.getSymbolManager();
auto &BVF = SymMgr.getBasicVals();
- auto &SVB = C.getSValBuilder();
- const auto OldOffset = Pos->getOffset();
- auto NewOffset =
- SVB.evalBinOp(State, BO_Sub,
- nonloc::SymbolVal(OldOffset),
- nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
+ const auto NewPos =
+ advancePosition(C, OO_Minus, *Pos,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
State = setIteratorPosition(State, Iter, NewPos);
State = setIteratorPosition(State, RetVal, Postfix ? *Pos : NewPos);
C.addTransition(State);
@@ -739,69 +1033,95 @@ void IteratorChecker::handleRandomIncrOrDecr(CheckerContext &C,
value = &val;
}
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- const auto OldOffset = Pos->getOffset();
- SymbolRef NewOffset;
- if (const auto intValue = value->getAs<nonloc::ConcreteInt>()) {
- // For concrete integers we can calculate the new position
- NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
- *intValue,
- SymMgr.getType(OldOffset)).getAsSymbol();
- } else {
- // For other symbols create a new symbol to keep expressions simple
- const auto &LCtx = C.getLocationContext();
- NewOffset = SymMgr.conjureSymbol(nullptr, LCtx, SymMgr.getType(OldOffset),
- C.blockCount());
- State = assumeNoOverflow(State, NewOffset, 4);
- }
- auto NewPos = Pos->setTo(NewOffset);
auto &TgtVal = (Op == OO_PlusEqual || Op == OO_MinusEqual) ? LHS : RetVal;
- State = setIteratorPosition(State, TgtVal, NewPos);
+ State =
+ setIteratorPosition(State, TgtVal, advancePosition(C, Op, *Pos, *value));
C.addTransition(State);
}
+void IteratorChecker::verifyIncrement(CheckerContext &C,
+ const SVal &Iter) const {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ verifyRandomIncrOrDecr(C, OO_Plus, Iter,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+}
+
+void IteratorChecker::verifyDecrement(CheckerContext &C,
+ const SVal &Iter) const {
+ auto &BVF = C.getSValBuilder().getBasicValueFactory();
+ verifyRandomIncrOrDecr(C, OO_Minus, Iter,
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))));
+}
+
void IteratorChecker::verifyRandomIncrOrDecr(CheckerContext &C,
OverloadedOperatorKind Op,
- const SVal &RetVal,
const SVal &LHS,
const SVal &RHS) const {
auto State = C.getState();
// If the iterator is initially inside its range, then the operation is valid
const auto *Pos = getIteratorPosition(State, LHS);
- if (!Pos || !isOutOfRange(State, *Pos))
+ if (!Pos)
return;
- auto value = RHS;
- if (auto loc = RHS.getAs<Loc>()) {
- value = State->getRawSVal(*loc);
+ auto Value = RHS;
+ if (auto ValAsLoc = RHS.getAs<Loc>()) {
+ Value = State->getRawSVal(*ValAsLoc);
}
- // Incremention or decremention by 0 is never bug
- if (isZero(State, value.castAs<NonLoc>()))
+ if (Value.isUnknown())
return;
- auto &SymMgr = C.getSymbolManager();
- auto &SVB = C.getSValBuilder();
- auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
- const auto OldOffset = Pos->getOffset();
- const auto intValue = value.getAs<nonloc::ConcreteInt>();
- if (!intValue)
+ // Incremention or decremention by 0 is never a bug.
+ if (isZero(State, Value.castAs<NonLoc>()))
return;
- auto NewOffset = SVB.evalBinOp(State, BinOp, nonloc::SymbolVal(OldOffset),
- *intValue,
- SymMgr.getType(OldOffset)).getAsSymbol();
- auto NewPos = Pos->setTo(NewOffset);
+ // The result may be the past-end iterator of the container, but any other
+ // out of range position is undefined behaviour
+ if (isAheadOfRange(State, advancePosition(C, Op, *Pos, Value))) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ reportOutOfRangeBug("Iterator decremented ahead of its valid range.", LHS,
+ C, N);
+ }
+ if (isBehindPastTheEnd(State, advancePosition(C, Op, *Pos, Value))) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N)
+ return;
+ reportOutOfRangeBug("Iterator incremented behind the past-the-end "
+ "iterator.", LHS, C, N);
+ }
+}
+
+void IteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
+ const MemRegion *Cont) const {
+ // Verify match between a container and the container of an iterator
+ Cont = Cont->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (Pos && Pos->getContainer() != Cont) {
+ auto *N = C.generateNonFatalErrorNode(State);
+ if (!N) {
+ return;
+ }
+ reportMismatchedBug("Container accessed using foreign iterator argument.", Iter, Cont, C, N);
+ }
+}
- // If out of range, the only valid operation is to step into the range
- if (isOutOfRange(State, NewPos)) {
+void IteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const {
+ // Verify match between the containers of two iterators
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (Pos1 && Pos2 && Pos1->getContainer() != Pos2->getContainer()) {
auto *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- reportOutOfRangeBug("Iterator accessed past its end.", LHS, C, N);
+ reportMismatchedBug("Iterators of different containers used where the "
+ "same container is expected.", Iter1, Iter2, C, N);
}
}
@@ -811,9 +1131,7 @@ void IteratorChecker::handleBegin(CheckerContext &C, const Expr *CE,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// If the container already has a begin symbol then use it. Otherwise first
// create a new one.
@@ -837,9 +1155,7 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
if (!ContReg)
return;
- while (const auto *CBOR = ContReg->getAs<CXXBaseObjectRegion>()) {
- ContReg = CBOR->getSuperRegion();
- }
+ ContReg = ContReg->getMostDerivedObjectRegion();
// If the container already has an end symbol then use it. Otherwise first
// create a new one.
@@ -860,9 +1176,7 @@ void IteratorChecker::handleEnd(CheckerContext &C, const Expr *CE,
void IteratorChecker::assignToContainer(CheckerContext &C, const Expr *CE,
const SVal &RetVal,
const MemRegion *Cont) const {
- while (const auto *CBOR = Cont->getAs<CXXBaseObjectRegion>()) {
- Cont = CBOR->getSuperRegion();
- }
+ Cont = Cont->getMostDerivedObjectRegion();
auto State = C.getState();
auto &SymMgr = C.getSymbolManager();
@@ -874,6 +1188,399 @@ void IteratorChecker::assignToContainer(CheckerContext &C, const Expr *CE,
C.addTransition(State);
}
+void IteratorChecker::handleAssign(CheckerContext &C, const SVal &Cont,
+ const Expr *CE, const SVal &OldCont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // Assignment of a new value to a container always invalidates all its
+ // iterators
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ }
+
+ // In case of move, iterators of the old container (except the past-end
+ // iterators) remain valid but refer to the new container
+ if (!OldCont.isUndef()) {
+ const auto *OldContReg = OldCont.getAsRegion();
+ if (OldContReg) {
+ OldContReg = OldContReg->getMostDerivedObjectRegion();
+ const auto OldCData = getContainerData(State, OldContReg);
+ if (OldCData) {
+ if (const auto OldEndSym = OldCData->getEnd()) {
+ // If we already assigned an "end" symbol to the old container, then
+ // first reassign all iterator positions to the new container which
+ // are not past the container (thus not greater or equal to the
+ // current "end" symbol).
+ State = reassignAllIteratorPositionsUnless(State, OldContReg, ContReg,
+ OldEndSym, BO_GE);
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+ // Then generate and assign a new "end" symbol for the new container.
+ auto NewEndSym =
+ SymMgr.conjureSymbol(CE, C.getLocationContext(),
+ C.getASTContext().LongTy, C.blockCount());
+ State = assumeNoOverflow(State, NewEndSym, 4);
+ if (CData) {
+ State = setContainerData(State, ContReg, CData->newEnd(NewEndSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromEnd(NewEndSym));
+ }
+ // Finally, replace the old "end" symbol in the already reassigned
+ // iterator positions with the new "end" symbol.
+ State = rebaseSymbolInIteratorPositionsIf(
+ State, SVB, OldEndSym, NewEndSym, OldEndSym, BO_LT);
+ } else {
+ // There was no "end" symbol assigned yet to the old container,
+ // so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ if (const auto OldBeginSym = OldCData->getBegin()) {
+ // If we already assigned a "begin" symbol to the old container, then
+ // assign it to the new container and remove it from the old one.
+ if (CData) {
+ State =
+ setContainerData(State, ContReg, CData->newBegin(OldBeginSym));
+ } else {
+ State = setContainerData(State, ContReg,
+ ContainerData::fromBegin(OldBeginSym));
+ }
+ State =
+ setContainerData(State, OldContReg, OldCData->newEnd(nullptr));
+ }
+ } else {
+ // There was neither "begin" nor "end" symbol assigned yet to the old
+ // container, so reassign all iterator positions to the new container.
+ State = reassignAllIteratorPositions(State, OldContReg, ContReg);
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+void IteratorChecker::handleClear(CheckerContext &C, const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // The clear() operation invalidates all the iterators, except the past-end
+ // iterators of list-like containers
+ auto State = C.getState();
+ if (!hasSubscriptOperator(State, ContReg) ||
+ !backModifiable(State, ContReg)) {
+ const auto CData = getContainerData(State, ContReg);
+ if (CData) {
+ if (const auto EndSym = CData->getEnd()) {
+ State =
+ invalidateAllIteratorPositionsExcept(State, ContReg, EndSym, BO_GE);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+}
+
+void IteratorChecker::handlePushBack(CheckerContext &C,
+ const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg) && frontModifiable(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ return;
+ }
+
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For vector-like containers invalidate the past-end iterator positions
+ if (const auto EndSym = CData->getEnd()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newEndSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ }
+ C.addTransition(State);
+}
+
+void IteratorChecker::handlePopBack(CheckerContext &C, const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto EndSym = CData->getEnd()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto BackSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(EndSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(EndSym)).getAsSymbol();
+ // For vector-like and deque-like containers invalidate the last and the
+ // past-end iterator positions. For list-like containers only invalidate
+ // the last position
+ if (hasSubscriptOperator(State, ContReg) &&
+ backModifiable(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BackSym, BO_GE);
+ State = setContainerData(State, ContReg, CData->newEnd(nullptr));
+ } else {
+ State = invalidateIteratorPositions(State, BackSym, BO_EQ);
+ }
+ auto newEndSym = BackSym;
+ State = setContainerData(State, ContReg, CData->newEnd(newEndSym));
+ C.addTransition(State);
+ }
+}
+
+void IteratorChecker::handlePushFront(CheckerContext &C,
+ const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ // For deque-like containers invalidate all iterator positions
+ auto State = C.getState();
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateAllIteratorPositions(State, ContReg);
+ C.addTransition(State);
+ } else {
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ if (const auto BeginSym = CData->getBegin()) {
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Sub,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State);
+ }
+ }
+}
+
+void IteratorChecker::handlePopFront(CheckerContext &C,
+ const SVal &Cont) const {
+ const auto *ContReg = Cont.getAsRegion();
+ if (!ContReg)
+ return;
+
+ ContReg = ContReg->getMostDerivedObjectRegion();
+
+ auto State = C.getState();
+ const auto CData = getContainerData(State, ContReg);
+ if (!CData)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For list-like
+ // iterators only invalidate the first position
+ if (const auto BeginSym = CData->getBegin()) {
+ if (hasSubscriptOperator(State, ContReg)) {
+ State = invalidateIteratorPositions(State, BeginSym, BO_LE);
+ } else {
+ State = invalidateIteratorPositions(State, BeginSym, BO_EQ);
+ }
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto newBeginSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(BeginSym),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(BeginSym)).getAsSymbol();
+ State = setContainerData(State, ContReg, CData->newBegin(newBeginSym));
+ C.addTransition(State);
+ }
+}
+
+void IteratorChecker::handleInsert(CheckerContext &C, const SVal &Iter) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions after the insertion.
+ const auto *Cont = Pos->getContainer();
+ if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
+ if (frontModifiable(State, Cont)) {
+ State = invalidateAllIteratorPositions(State, Cont);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, Cont)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, Cont, CData->newEnd(nullptr));
+ }
+ }
+ C.addTransition(State);
+ }
+}
+
+void IteratorChecker::handleErase(CheckerContext &C, const SVal &Iter) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion. For list-like containers only invalidate the deleted position.
+ const auto *Cont = Pos->getContainer();
+ if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
+ if (frontModifiable(State, Cont)) {
+ State = invalidateAllIteratorPositions(State, Cont);
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, Cont)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, Cont, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos->getOffset(), BO_EQ);
+ }
+ C.addTransition(State);
+}
+
+void IteratorChecker::handleErase(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const {
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // For deque-like containers invalidate all iterator positions. For
+ // vector-like containers invalidate iterator positions at and after the
+ // deletion range. For list-like containers only invalidate the deleted
+ // position range [first..last].
+ const auto *Cont = Pos1->getContainer();
+ if (hasSubscriptOperator(State, Cont) && backModifiable(State, Cont)) {
+ if (frontModifiable(State, Cont)) {
+ State = invalidateAllIteratorPositions(State, Cont);
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE);
+ }
+ if (const auto *CData = getContainerData(State, Cont)) {
+ if (const auto EndSym = CData->getEnd()) {
+ State = invalidateIteratorPositions(State, EndSym, BO_GE);
+ State = setContainerData(State, Cont, CData->newEnd(nullptr));
+ }
+ }
+ } else {
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GE,
+ Pos2->getOffset(), BO_LT);
+ }
+ C.addTransition(State);
+}
+
+void IteratorChecker::handleEraseAfter(CheckerContext &C,
+ const SVal &Iter) const {
+ auto State = C.getState();
+ const auto *Pos = getIteratorPosition(State, Iter);
+ if (!Pos)
+ return;
+
+ // Invalidate the deleted iterator position, which is the position of the
+ // parameter plus one.
+ auto &SymMgr = C.getSymbolManager();
+ auto &BVF = SymMgr.getBasicVals();
+ auto &SVB = C.getSValBuilder();
+ const auto NextSym =
+ SVB.evalBinOp(State, BO_Add,
+ nonloc::SymbolVal(Pos->getOffset()),
+ nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(1))),
+ SymMgr.getType(Pos->getOffset())).getAsSymbol();
+ State = invalidateIteratorPositions(State, NextSym, BO_EQ);
+ C.addTransition(State);
+}
+
+void IteratorChecker::handleEraseAfter(CheckerContext &C, const SVal &Iter1,
+ const SVal &Iter2) const {
+ auto State = C.getState();
+ const auto *Pos1 = getIteratorPosition(State, Iter1);
+ const auto *Pos2 = getIteratorPosition(State, Iter2);
+ if (!Pos1 || !Pos2)
+ return;
+
+ // Invalidate the deleted iterator position range (first..last)
+ State = invalidateIteratorPositions(State, Pos1->getOffset(), BO_GT,
+ Pos2->getOffset(), BO_LT);
+ C.addTransition(State);
+}
+
+IteratorPosition IteratorChecker::advancePosition(CheckerContext &C,
+ OverloadedOperatorKind Op,
+ const IteratorPosition &Pos,
+ const SVal &Distance) const {
+ auto State = C.getState();
+ auto &SymMgr = C.getSymbolManager();
+ auto &SVB = C.getSValBuilder();
+
+ assert ((Op == OO_Plus || Op == OO_PlusEqual ||
+ Op == OO_Minus || Op == OO_MinusEqual) &&
+ "Advance operator must be one of +, -, += and -=.");
+ auto BinOp = (Op == OO_Plus || Op == OO_PlusEqual) ? BO_Add : BO_Sub;
+ if (const auto IntDist = Distance.getAs<nonloc::ConcreteInt>()) {
+ // For concrete integers we can calculate the new position
+ return Pos.setTo(SVB.evalBinOp(State, BinOp,
+ nonloc::SymbolVal(Pos.getOffset()), *IntDist,
+ SymMgr.getType(Pos.getOffset()))
+ .getAsSymbol());
+ } else {
+ // For other symbols create a new symbol to keep expressions simple
+ const auto &LCtx = C.getLocationContext();
+ const auto NewPosSym = SymMgr.conjureSymbol(nullptr, LCtx,
+ SymMgr.getType(Pos.getOffset()),
+ C.blockCount());
+ State = assumeNoOverflow(State, NewPosSym, 4);
+ return Pos.setTo(NewPosSym);
+ }
+}
+
void IteratorChecker::reportOutOfRangeBug(const StringRef &Message,
const SVal &Val, CheckerContext &C,
ExplodedNode *ErrNode) const {
@@ -882,14 +1589,47 @@ void IteratorChecker::reportOutOfRangeBug(const StringRef &Message,
C.emitReport(std::move(R));
}
+void IteratorChecker::reportMismatchedBug(const StringRef &Message,
+ const SVal &Val1, const SVal &Val2,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
+ auto R = llvm::make_unique<BugReport>(*MismatchedBugType, Message, ErrNode);
+ R->markInteresting(Val1);
+ R->markInteresting(Val2);
+ C.emitReport(std::move(R));
+}
+
+void IteratorChecker::reportMismatchedBug(const StringRef &Message,
+ const SVal &Val, const MemRegion *Reg,
+ CheckerContext &C,
+ ExplodedNode *ErrNode) const {
+ auto R = llvm::make_unique<BugReport>(*MismatchedBugType, Message, ErrNode);
+ R->markInteresting(Val);
+ R->markInteresting(Reg);
+ C.emitReport(std::move(R));
+}
+
+void IteratorChecker::reportInvalidatedBug(const StringRef &Message,
+ const SVal &Val, CheckerContext &C,
+ ExplodedNode *ErrNode) const {
+ auto R = llvm::make_unique<BugReport>(*InvalidatedBugType, Message, ErrNode);
+ R->markInteresting(Val);
+ C.emitReport(std::move(R));
+}
+
namespace {
bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
-bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
+bool isGreater(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
+bool isEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
BinaryOperator::Opcode Opc);
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
BinaryOperator::Opcode Opc);
+const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
+ const MemRegion *Reg);
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB, SymbolRef Expr,
+ SymbolRef OldSym, SymbolRef NewSym);
bool isIteratorType(const QualType &Type) {
if (Type->isPointerType())
@@ -943,6 +1683,11 @@ bool isIterator(const CXXRecordDecl *CRD) {
HasPostIncrOp && HasDerefOp;
}
+bool isComparisonOperator(OverloadedOperatorKind OK) {
+ return OK == OO_EqualEqual || OK == OO_ExclaimEqual || OK == OO_Less ||
+ OK == OO_LessEqual || OK == OO_Greater || OK == OO_GreaterEqual;
+}
+
bool isBeginCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
@@ -957,10 +1702,139 @@ bool isEndCall(const FunctionDecl *Func) {
return IdInfo->getName().endswith_lower("end");
}
+bool isAssignCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() > 2)
+ return false;
+ return IdInfo->getName() == "assign";
+}
+
+bool isClearCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() > 0)
+ return false;
+ return IdInfo->getName() == "clear";
+}
+
+bool isPushBackCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() != 1)
+ return false;
+ return IdInfo->getName() == "push_back";
+}
+
+bool isEmplaceBackCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 1)
+ return false;
+ return IdInfo->getName() == "emplace_back";
+}
+
+bool isPopBackCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() > 0)
+ return false;
+ return IdInfo->getName() == "pop_back";
+}
+
+bool isPushFrontCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() != 1)
+ return false;
+ return IdInfo->getName() == "push_front";
+}
+
+bool isEmplaceFrontCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 1)
+ return false;
+ return IdInfo->getName() == "emplace_front";
+}
+
+bool isPopFrontCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() > 0)
+ return false;
+ return IdInfo->getName() == "pop_front";
+}
+
+bool isInsertCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 2 || Func->getNumParams() > 3)
+ return false;
+ if (!isIteratorType(Func->getParamDecl(0)->getType()))
+ return false;
+ return IdInfo->getName() == "insert";
+}
+
+bool isEmplaceCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 2)
+ return false;
+ if (!isIteratorType(Func->getParamDecl(0)->getType()))
+ return false;
+ return IdInfo->getName() == "emplace";
+}
+
+bool isEraseCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 1 || Func->getNumParams() > 2)
+ return false;
+ if (!isIteratorType(Func->getParamDecl(0)->getType()))
+ return false;
+ if (Func->getNumParams() == 2 &&
+ !isIteratorType(Func->getParamDecl(1)->getType()))
+ return false;
+ return IdInfo->getName() == "erase";
+}
+
+bool isEraseAfterCall(const FunctionDecl *Func) {
+ const auto *IdInfo = Func->getIdentifier();
+ if (!IdInfo)
+ return false;
+ if (Func->getNumParams() < 1 || Func->getNumParams() > 2)
+ return false;
+ if (!isIteratorType(Func->getParamDecl(0)->getType()))
+ return false;
+ if (Func->getNumParams() == 2 &&
+ !isIteratorType(Func->getParamDecl(1)->getType()))
+ return false;
+ return IdInfo->getName() == "erase_after";
+}
+
+bool isAssignmentOperator(OverloadedOperatorKind OK) { return OK == OO_Equal; }
+
bool isSimpleComparisonOperator(OverloadedOperatorKind OK) {
return OK == OO_EqualEqual || OK == OO_ExclaimEqual;
}
+bool isAccessOperator(OverloadedOperatorKind OK) {
+ return isDereferenceOperator(OK) || isIncrementOperator(OK) ||
+ isDecrementOperator(OK) || isRandomIncrOrDecrOperator(OK);
+}
+
bool isDereferenceOperator(OverloadedOperatorKind OK) {
return OK == OO_Star || OK == OO_Arrow || OK == OO_ArrowStar ||
OK == OO_Subscript;
@@ -996,6 +1870,66 @@ BinaryOperator::Opcode getOpcode(const SymExpr *SE) {
return BO_Comma; // Extremal value, neither EQ nor NE
}
+bool hasSubscriptOperator(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->isOverloadedOperator())
+ continue;
+ const auto OPK = Method->getOverloadedOperator();
+ if (OPK == OO_Subscript) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool frontModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_front" || Method->getName() == "pop_front") {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool backModifiable(ProgramStateRef State, const MemRegion *Reg) {
+ const auto *CRD = getCXXRecordDecl(State, Reg);
+ if (!CRD)
+ return false;
+
+ for (const auto *Method : CRD->methods()) {
+ if (!Method->getDeclName().isIdentifier())
+ continue;
+ if (Method->getName() == "push_back" || Method->getName() == "pop_back") {
+ return true;
+ }
+ }
+ return false;
+}
+
+const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
+ const MemRegion *Reg) {
+ auto TI = getDynamicTypeInfo(State, Reg);
+ if (!TI.isValid())
+ return nullptr;
+
+ auto Type = TI.getType();
+ if (const auto *RefT = Type->getAs<ReferenceType>()) {
+ Type = RefT->getPointeeType();
+ }
+
+ return Type->getUnqualifiedDesugaredType()->getAsCXXRecordDecl();
+}
+
const RegionOrSymbol getRegionOrSymbol(const SVal &Val) {
if (const auto Reg = Val.getAsRegion()) {
return Reg;
@@ -1097,7 +2031,8 @@ ProgramStateRef setContainerData(ProgramStateRef State, const MemRegion *Cont,
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const SVal &Val) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->get<IteratorRegionMap>(Reg);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->get<IteratorSymbolMap>(Sym);
@@ -1110,7 +2045,8 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
const IteratorPosition *getIteratorPosition(ProgramStateRef State,
RegionOrSymbol RegOrSym) {
if (RegOrSym.is<const MemRegion *>()) {
- return State->get<IteratorRegionMap>(RegOrSym.get<const MemRegion *>());
+ auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
+ return State->get<IteratorRegionMap>(Reg);
} else if (RegOrSym.is<SymbolRef>()) {
return State->get<IteratorSymbolMap>(RegOrSym.get<SymbolRef>());
}
@@ -1119,7 +2055,8 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
const IteratorPosition &Pos) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->set<IteratorRegionMap>(Reg, Pos);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->set<IteratorSymbolMap>(Sym, Pos);
@@ -1133,8 +2070,8 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State,
RegionOrSymbol RegOrSym,
const IteratorPosition &Pos) {
if (RegOrSym.is<const MemRegion *>()) {
- return State->set<IteratorRegionMap>(RegOrSym.get<const MemRegion *>(),
- Pos);
+ auto Reg = RegOrSym.get<const MemRegion *>()->getMostDerivedObjectRegion();
+ return State->set<IteratorRegionMap>(Reg, Pos);
} else if (RegOrSym.is<SymbolRef>()) {
return State->set<IteratorSymbolMap>(RegOrSym.get<SymbolRef>(), Pos);
}
@@ -1142,7 +2079,8 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State,
}
ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
- if (const auto Reg = Val.getAsRegion()) {
+ if (auto Reg = Val.getAsRegion()) {
+ Reg = Reg->getMostDerivedObjectRegion();
return State->remove<IteratorRegionMap>(Reg);
} else if (const auto Sym = Val.getAsSymbol()) {
return State->remove<IteratorSymbolMap>(Sym);
@@ -1211,6 +2149,164 @@ bool hasLiveIterators(ProgramStateRef State, const MemRegion *Cont) {
return false;
}
+bool isBoundThroughLazyCompoundVal(const Environment &Env,
+ const MemRegion *Reg) {
+ for (const auto Binding: Env) {
+ if (const auto LCVal = Binding.second.getAs<nonloc::LazyCompoundVal>()) {
+ if (LCVal->getRegion() == Reg)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template <typename Condition, typename Process>
+ProgramStateRef processIteratorPositions(ProgramStateRef State, Condition Cond,
+ Process Proc) {
+ auto &RegionMapFactory = State->get_context<IteratorRegionMap>();
+ auto RegionMap = State->get<IteratorRegionMap>();
+ bool Changed = false;
+ for (const auto Reg : RegionMap) {
+ if (Cond(Reg.second)) {
+ RegionMap = RegionMapFactory.add(RegionMap, Reg.first, Proc(Reg.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorRegionMap>(RegionMap);
+
+ auto &SymbolMapFactory = State->get_context<IteratorSymbolMap>();
+ auto SymbolMap = State->get<IteratorSymbolMap>();
+ Changed = false;
+ for (const auto Sym : SymbolMap) {
+ if (Cond(Sym.second)) {
+ SymbolMap = SymbolMapFactory.add(SymbolMap, Sym.first, Proc(Sym.second));
+ Changed = true;
+ }
+ }
+
+ if (Changed)
+ State = State->set<IteratorSymbolMap>(SymbolMap);
+
+ return State;
+}
+
+ProgramStateRef invalidateAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchCont, Invalidate);
+}
+
+ProgramStateRef
+invalidateAllIteratorPositionsExcept(ProgramStateRef State,
+ const MemRegion *Cont, SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, MatchContAndCompare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef invalidateIteratorPositions(ProgramStateRef State,
+ SymbolRef Offset1,
+ BinaryOperator::Opcode Opc1,
+ SymbolRef Offset2,
+ BinaryOperator::Opcode Opc2) {
+ auto Compare = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), Offset1, Opc1) &&
+ compare(State, Pos.getOffset(), Offset2, Opc2);
+ };
+ auto Invalidate = [&](const IteratorPosition &Pos) {
+ return Pos.invalidate();
+ };
+ return processIteratorPositions(State, Compare, Invalidate);
+}
+
+ProgramStateRef reassignAllIteratorPositions(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont) {
+ auto MatchCont = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont;
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchCont, ReAssign);
+}
+
+ProgramStateRef reassignAllIteratorPositionsUnless(ProgramStateRef State,
+ const MemRegion *Cont,
+ const MemRegion *NewCont,
+ SymbolRef Offset,
+ BinaryOperator::Opcode Opc) {
+ auto MatchContAndCompare = [&](const IteratorPosition &Pos) {
+ return Pos.getContainer() == Cont &&
+ !compare(State, Pos.getOffset(), Offset, Opc);
+ };
+ auto ReAssign = [&](const IteratorPosition &Pos) {
+ return Pos.reAssign(NewCont);
+ };
+ return processIteratorPositions(State, MatchContAndCompare, ReAssign);
+}
+
+// This function rebases symbolic expression `OldSym + Int` to `NewSym + Int`,
+// `OldSym - Int` to `NewSym - Int` and `OldSym` to `NewSym` in any iterator
+// position offsets where `CondSym` is true.
+ProgramStateRef rebaseSymbolInIteratorPositionsIf(
+ ProgramStateRef State, SValBuilder &SVB, SymbolRef OldSym,
+ SymbolRef NewSym, SymbolRef CondSym, BinaryOperator::Opcode Opc) {
+ auto LessThanEnd = [&](const IteratorPosition &Pos) {
+ return compare(State, Pos.getOffset(), CondSym, Opc);
+ };
+ auto RebaseSymbol = [&](const IteratorPosition &Pos) {
+ return Pos.setTo(rebaseSymbol(State, SVB, Pos.getOffset(), OldSym,
+ NewSym));
+ };
+ return processIteratorPositions(State, LessThanEnd, RebaseSymbol);
+}
+
+// This function rebases symbolic expression `OldExpr + Int` to `NewExpr + Int`,
+// `OldExpr - Int` to `NewExpr - Int` and `OldExpr` to `NewExpr` in expression
+// `OrigExpr`.
+SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
+ SymbolRef OrigExpr, SymbolRef OldExpr,
+ SymbolRef NewSym) {
+ auto &SymMgr = SVB.getSymbolManager();
+ auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
+ nonloc::SymbolVal(OldExpr),
+ SymMgr.getType(OrigExpr));
+
+ const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
+ if (!DiffInt)
+ return OrigExpr;
+
+ return SVB.evalBinOpNN(State, BO_Add, *DiffInt, nonloc::SymbolVal(NewSym),
+ SymMgr.getType(OrigExpr)).getAsSymbol();
+}
+
bool isZero(ProgramStateRef State, const NonLoc &Val) {
auto &BVF = State->getBasicVals();
return compare(State, Val,
@@ -1218,14 +2314,27 @@ bool isZero(ProgramStateRef State, const NonLoc &Val) {
BO_EQ);
}
-bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
+bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos) {
const auto *Cont = Pos.getContainer();
const auto *CData = getContainerData(State, Cont);
if (!CData)
return false;
- // Out of range means less than the begin symbol or greater or equal to the
- // end symbol.
+ const auto End = CData->getEnd();
+ if (End) {
+ if (isEqual(State, Pos.getOffset(), End)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
+ const auto *Cont = Pos.getContainer();
+ const auto *CData = getContainerData(State, Cont);
+ if (!CData)
+ return false;
const auto Beg = CData->getBegin();
if (Beg) {
@@ -1234,9 +2343,18 @@ bool isOutOfRange(ProgramStateRef State, const IteratorPosition &Pos) {
}
}
+ return false;
+}
+
+bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos) {
+ const auto *Cont = Pos.getContainer();
+ const auto *CData = getContainerData(State, Cont);
+ if (!CData)
+ return false;
+
const auto End = CData->getEnd();
if (End) {
- if (isGreaterOrEqual(State, Pos.getOffset(), End)) {
+ if (isGreater(State, Pos.getOffset(), End)) {
return true;
}
}
@@ -1248,8 +2366,12 @@ bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
return compare(State, Sym1, Sym2, BO_LT);
}
-bool isGreaterOrEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
- return compare(State, Sym1, Sym2, BO_GE);
+bool isGreater(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
+ return compare(State, Sym1, Sym2, BO_GT);
+}
+
+bool isEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2) {
+ return compare(State, Sym1, Sym2, BO_EQ);
}
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
@@ -1257,6 +2379,7 @@ bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
return compare(State, nonloc::SymbolVal(Sym1), nonloc::SymbolVal(Sym2), Opc);
}
+
bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
BinaryOperator::Opcode Opc) {
auto &SVB = State->getStateManager().getSValBuilder();
@@ -1281,4 +2404,5 @@ bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
}
REGISTER_CHECKER(IteratorRangeChecker)
-
+REGISTER_CHECKER(MismatchedIteratorChecker)
+REGISTER_CHECKER(InvalidatedIteratorChecker)
diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 2fb627184eb9..aade62fd7491 100644
--- a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -28,7 +28,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
diff --git a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index db4fbca36deb..df238f2b2e45 100644
--- a/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
@@ -32,8 +32,7 @@ static bool IsLLVMStringRef(QualType T) {
if (!RT)
return false;
- return StringRef(QualType(RT, 0).getAsString()) ==
- "class StringRef";
+ return StringRef(QualType(RT, 0).getAsString()) == "class StringRef";
}
/// Check whether the declaration is semantically inside the top-level
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 849b1193c042..eda39efeca17 100644
--- a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -125,7 +125,6 @@ public:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -1003,7 +1002,6 @@ void NonLocalizedStringChecker::checkPostStmt(const ObjCStringLiteral *SL,
std::shared_ptr<PathDiagnosticPiece>
NonLocalizedStringBRVisitor::VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC, BugReport &BR) {
if (Satisfied)
return nullptr;
@@ -1400,7 +1398,8 @@ void ento::registerNonLocalizedStringChecker(CheckerManager &mgr) {
NonLocalizedStringChecker *checker =
mgr.registerChecker<NonLocalizedStringChecker>();
checker->IsAggressive =
- mgr.getAnalyzerOptions().getBooleanOption("AggressiveReport", false);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption("AggressiveReport",
+ false, checker);
}
void ento::registerEmptyLocalizationContextChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
index e9ec7a0c4365..fb9bccebe465 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
@@ -87,7 +87,6 @@ void MPIBugReporter::reportUnmatchedWait(
std::shared_ptr<PathDiagnosticPiece>
MPIBugReporter::RequestNodeVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
@@ -96,13 +95,13 @@ MPIBugReporter::RequestNodeVisitor::VisitNode(const ExplodedNode *N,
const Request *const Req = N->getState()->get<RequestMap>(RequestRegion);
const Request *const PrevReq =
- PrevN->getState()->get<RequestMap>(RequestRegion);
+ N->getFirstPred()->getState()->get<RequestMap>(RequestRegion);
// Check if request was previously unused or in a different state.
if ((Req && !PrevReq) || (Req->CurrentState != PrevReq->CurrentState)) {
IsNodeFound = true;
- ProgramPoint P = PrevN->getLocation();
+ ProgramPoint P = N->getFirstPred()->getLocation();
PathDiagnosticLocation L =
PathDiagnosticLocation::create(P, BRC.getSourceManager());
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
index 40eb0631d7c5..32fcb07e3371 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -91,7 +91,6 @@ private:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 696cf39473d5..28c6898f7947 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -16,7 +16,7 @@
//===----------------------------------------------------------------------===//
#include "MPIChecker.h"
-#include "../ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
namespace clang {
namespace ento {
@@ -100,9 +100,6 @@ void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent,
void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper,
CheckerContext &Ctx) const {
- if (!SymReaper.hasDeadSymbols())
- return;
-
ProgramStateRef State = Ctx.getState();
const auto &Requests = State->get<RequestMap>();
if (Requests.isEmpty())
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index b8ef6701c0df..06e27fc5718d 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -12,10 +12,11 @@
// to be freed using a call to SecKeychainItemFreeContent.
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -29,6 +30,7 @@ namespace {
class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::DeadSymbols,
+ check::PointerEscape,
eval::Assume> {
mutable std::unique_ptr<BugType> BT;
@@ -58,6 +60,10 @@ public:
void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ ProgramStateRef checkPointerEscape(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call,
+ PointerEscapeKind Kind) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void printState(raw_ostream &Out, ProgramStateRef State,
@@ -135,7 +141,6 @@ private:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
};
@@ -571,14 +576,52 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
C.addTransition(State, N);
}
+ProgramStateRef MacOSKeychainAPIChecker::checkPointerEscape(
+ ProgramStateRef State, const InvalidatedSymbols &Escaped,
+ const CallEvent *Call, PointerEscapeKind Kind) const {
+ // FIXME: This branch doesn't make any sense at all, but it is an overfitted
+ // replacement for a previous overfitted code that was making even less sense.
+ if (!Call || Call->getDecl())
+ return State;
+
+ for (auto I : State->get<AllocatedData>()) {
+ SymbolRef Sym = I.first;
+ if (Escaped.count(Sym))
+ State = State->remove<AllocatedData>(Sym);
+
+ // This checker is special. Most checkers in fact only track symbols of
+ // SymbolConjured type, eg. symbols returned from functions such as
+ // malloc(). This checker tracks symbols returned as out-parameters.
+ //
+ // When a function is evaluated conservatively, the out-parameter's pointee
+ // base region gets invalidated with a SymbolConjured. If the base region is
+ // larger than the region we're interested in, the value we're interested in
+ // would be SymbolDerived based on that SymbolConjured. However, such
+ // SymbolDerived will never be listed in the Escaped set when the base
+ // region is invalidated because ExprEngine doesn't know which symbols
+ // were derived from a given symbol, while there can be infinitely many
+ // valid symbols derived from any given symbol.
+ //
+ // Hence the extra boilerplate: remove the derived symbol when its parent
+ // symbol escapes.
+ //
+ if (const auto *SD = dyn_cast<SymbolDerived>(Sym)) {
+ SymbolRef ParentSym = SD->getParentSymbol();
+ if (Escaped.count(ParentSym))
+ State = State->remove<AllocatedData>(Sym);
+ }
+ }
+ return State;
+}
+
std::shared_ptr<PathDiagnosticPiece>
MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
- const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
- BugReport &BR) {
+ const ExplodedNode *N, BugReporterContext &BRC, BugReport &BR) {
const AllocationState *AS = N->getState()->get<AllocatedData>(Sym);
if (!AS)
return nullptr;
- const AllocationState *ASPrev = PrevN->getState()->get<AllocatedData>(Sym);
+ const AllocationState *ASPrev =
+ N->getFirstPred()->getState()->get<AllocatedData>(Sym);
if (ASPrev)
return nullptr;
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 437378e53daa..f5976d7da4c1 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -83,7 +83,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
// that dispatch_once is a macro that wraps a call to _dispatch_once.
// _dispatch_once is then a function which then calls the real dispatch_once.
// Users do not care; they just want the warning at the top-level call.
- if (CE->getLocStart().isMacroID()) {
+ if (CE->getBeginLoc().isMacroID()) {
StringRef TrimmedFName = FName.ltrim('_');
if (TrimmedFName != FName)
FName = TrimmedFName;
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index ebaf79a780c0..ae1b1fc837be 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
@@ -161,6 +161,7 @@ class MallocChecker : public Checker<check::DeadSymbols,
check::PointerEscape,
check::ConstPointerEscape,
check::PreStmt<ReturnStmt>,
+ check::EndFunction,
check::PreCall,
check::PostStmt<CallExpr>,
check::PostStmt<CXXNewExpr>,
@@ -193,6 +194,7 @@ public:
CK_NewDeleteChecker,
CK_NewDeleteLeaksChecker,
CK_MismatchedDeallocatorChecker,
+ CK_InnerPointerChecker,
CK_NumCheckKinds
};
@@ -217,6 +219,7 @@ public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *S, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void checkLocation(SVal l, bool isLoad, const Stmt *S,
@@ -353,7 +356,7 @@ private:
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State);
- ///Check if the memory associated with this symbol was released.
+ /// Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
@@ -377,13 +380,16 @@ private:
ProgramStateRef State,
SymbolRef &EscapingSymbol) const;
- // Implementation of the checkPointerEscape callabcks.
+ // Implementation of the checkPointerEscape callbacks.
ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind,
bool(*CheckRefState)(const RefState*)) const;
+ // Implementation of the checkPreStmt and checkEndFunction callbacks.
+ void checkEscapeOnReturn(const ReturnStmt *S, CheckerContext &C) const;
+
///@{
/// Tells if a given family/call/symbol is tracked by the current checker.
/// Sets CheckKind to the kind of the checker responsible for this
@@ -511,7 +517,6 @@ private:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -707,10 +712,8 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
return false;
}
-// Tells if the callee is one of the following:
-// 1) A global non-placement new/delete operator function.
-// 2) A global placement operator function with the single placement argument
-// of type std::nothrow_t.
+// Tells if the callee is one of the builtin new/delete operators, including
+// placement operators and other standard overloads.
bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
ASTContext &C) const {
if (!FD)
@@ -721,23 +724,11 @@ bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
Kind != OO_Delete && Kind != OO_Array_Delete)
return false;
- // Skip all operator new/delete methods.
- if (isa<CXXMethodDecl>(FD))
- return false;
-
- // Return true if tested operator is a standard placement nothrow operator.
- if (FD->getNumParams() == 2) {
- QualType T = FD->getParamDecl(1)->getType();
- if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
- return II->getName().equals("nothrow_t");
- }
-
- // Skip placement operators.
- if (FD->getNumParams() != 1 || FD->isVariadic())
- return false;
-
- // One of the standard new/new[]/delete/delete[] non-placement operators.
- return true;
+ // This is standard if and only if it's not defined in a user file.
+ SourceLocation L = FD->getLocation();
+ // If the header for operator delete is not included, it's still defined
+ // in an invalid source location. Check to make sure we don't crash.
+ return !L.isValid() || C.getSourceManager().isInSystemHeader(L);
}
llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
@@ -1082,12 +1073,6 @@ static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
CheckerContext &C,
SVal Target) const {
- if (NE->getNumPlacementArgs())
- for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
- E = NE->placement_arg_end(); I != E; ++I)
- if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
- checkUseAfterFree(Sym, C, *I);
-
if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
return;
@@ -1098,7 +1083,7 @@ void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
- // MallocUpdateRefState() instead of MallocMemAux() which breakes the
+ // MallocUpdateRefState() instead of MallocMemAux() which breaks the
// existing binding.
State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
: AF_CXXNew, Target);
@@ -1109,7 +1094,7 @@ void MallocChecker::processNewAllocation(const CXXNewExpr *NE,
void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
CheckerContext &C) const {
- if (!C.getAnalysisManager().getAnalyzerOptions().mayInlineCXXAllocator())
+ if (!C.getAnalysisManager().getAnalyzerOptions().MayInlineCXXAllocator)
processNewAllocation(NE, C, C.getSVal(NE));
}
@@ -1657,13 +1642,10 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
case AF_IfNameIndex: {
if (ChecksEnabled[CK_MallocChecker])
return CK_MallocChecker;
-
- return Optional<MallocChecker::CheckKind>();
+ return None;
}
case AF_CXXNew:
- case AF_CXXNewArray:
- // FIXME: Add new CheckKind for AF_InnerBuffer.
- case AF_InnerBuffer: {
+ case AF_CXXNewArray: {
if (IsALeakCheck) {
if (ChecksEnabled[CK_NewDeleteLeaksChecker])
return CK_NewDeleteLeaksChecker;
@@ -1672,7 +1654,12 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
if (ChecksEnabled[CK_NewDeleteChecker])
return CK_NewDeleteChecker;
}
- return Optional<MallocChecker::CheckKind>();
+ return None;
+ }
+ case AF_InnerBuffer: {
+ if (ChecksEnabled[CK_InnerPointerChecker])
+ return CK_InnerPointerChecker;
+ return None;
}
case AF_None: {
llvm_unreachable("no family");
@@ -1975,7 +1962,8 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const {
if (!ChecksEnabled[CK_MallocChecker] &&
- !ChecksEnabled[CK_NewDeleteChecker])
+ !ChecksEnabled[CK_NewDeleteChecker] &&
+ !ChecksEnabled[CK_InnerPointerChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
@@ -1987,15 +1975,20 @@ void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
BT_UseFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use-after-free", categories::MemoryError));
+ AllocationFamily AF =
+ C.getState()->get<RegionState>(Sym)->getAllocationFamily();
+
auto R = llvm::make_unique<BugReport>(*BT_UseFree[*CheckKind],
- "Use of memory after it is freed", N);
+ AF == AF_InnerBuffer
+ ? "Inner pointer of container used after re/deallocation"
+ : "Use of memory after it is freed",
+ N);
R->markInteresting(Sym);
R->addRange(Range);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
- const RefState *RS = C.getState()->get<RegionState>(Sym);
- if (RS->getAllocationFamily() == AF_InnerBuffer)
+ if (AF == AF_InnerBuffer)
R->addVisitor(allocation_state::getInnerPointerBRVisitor(Sym));
C.emitReport(std::move(R));
@@ -2352,13 +2345,11 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const
{
- if (!SymReaper.hasDeadSymbols())
- return;
-
ProgramStateRef state = C.getState();
- RegionStateTy RS = state->get<RegionState>();
+ RegionStateTy OldRS = state->get<RegionState>();
RegionStateTy::Factory &F = state->get_context<RegionState>();
+ RegionStateTy RS = OldRS;
SmallVector<SymbolRef, 2> Errors;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
@@ -2366,10 +2357,18 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
Errors.push_back(I->first);
// Remove the dead symbol from the map.
RS = F.remove(RS, I->first);
-
}
}
+ if (RS == OldRS) {
+ // We shouldn't have touched other maps yet.
+ assert(state->get<ReallocPairs>() ==
+ C.getState()->get<ReallocPairs>());
+ assert(state->get<FreeReturnValue>() ==
+ C.getState()->get<FreeReturnValue>());
+ return;
+ }
+
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
@@ -2425,10 +2424,6 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
isCMemFunction(FD, Ctx, AF_IfNameIndex,
MemoryOperationKind::MOK_Free)))
return;
-
- if (ChecksEnabled[CK_NewDeleteChecker] &&
- isStandardNewDelete(FD, Ctx))
- return;
}
// Check if the callee of a method is deleted.
@@ -2451,7 +2446,24 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
}
}
-void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+void MallocChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ checkEscapeOnReturn(S, C);
+}
+
+// In the CFG, automatic destructors come after the return statement.
+// This callback checks for returning memory that is freed by automatic
+// destructors, as those cannot be reached in checkPreStmt().
+void MallocChecker::checkEndFunction(const ReturnStmt *S,
+ CheckerContext &C) const {
+ checkEscapeOnReturn(S, C);
+}
+
+void MallocChecker::checkEscapeOnReturn(const ReturnStmt *S,
+ CheckerContext &C) const {
+ if (!S)
+ return;
+
const Expr *E = S->getRetValue();
if (!E)
return;
@@ -2509,8 +2521,7 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
}
state =
- state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
- Regions.data() + Regions.size()).getState();
+ state->scanReachableSymbols<StopTrackingCallback>(Regions).getState();
C.addTransition(state);
}
@@ -2858,11 +2869,10 @@ static bool isReferenceCountingPointerDestructor(const CXXDestructorDecl *DD) {
}
std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
- const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
- BugReport &BR) {
+ const ExplodedNode *N, BugReporterContext &BRC, BugReport &BR) {
ProgramStateRef state = N->getState();
- ProgramStateRef statePrev = PrevN->getState();
+ ProgramStateRef statePrev = N->getFirstPred()->getState();
const RefState *RS = state->get<RegionState>(Sym);
const RefState *RSPrev = statePrev->get<RegionState>(Sym);
@@ -2918,13 +2928,22 @@ std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
case AF_CXXNewArray:
case AF_IfNameIndex:
Msg = "Memory is released";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returning; memory was released");
break;
case AF_InnerBuffer: {
- OS << "Inner pointer invalidated by call to ";
+ const MemRegion *ObjRegion =
+ allocation_state::getContainerObjRegion(statePrev, Sym);
+ const auto *TypedRegion = cast<TypedValueRegion>(ObjRegion);
+ QualType ObjTy = TypedRegion->getValueType();
+ OS << "Inner buffer of '" << ObjTy.getAsString() << "' ";
+
if (N->getLocation().getKind() == ProgramPoint::PostImplicitCallKind) {
- OS << "destructor";
+ OS << "deallocated by call to destructor";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returning; inner buffer was deallocated");
} else {
- OS << "'";
+ OS << "reallocated by call to '";
const Stmt *S = RS->getStmt();
if (const auto *MemCallE = dyn_cast<CXXMemberCallExpr>(S)) {
OS << MemCallE->getMethodDecl()->getNameAsString();
@@ -2937,6 +2956,8 @@ std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
OS << (D ? D->getNameAsString() : "unknown");
}
OS << "'";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returning; inner buffer was reallocated");
}
Msg = OS.str();
break;
@@ -2944,8 +2965,6 @@ std::shared_ptr<PathDiagnosticPiece> MallocChecker::MallocBugVisitor::VisitNode(
case AF_None:
llvm_unreachable("Unhandled allocation family!");
}
- StackHint = new StackHintGeneratorForSymbol(Sym,
- "Returning; memory was released");
// See if we're releasing memory while inlining a destructor
// (or one of its callees). This turns on various common
@@ -3071,7 +3090,7 @@ markReleased(ProgramStateRef State, SymbolRef Sym, const Expr *Origin) {
void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
registerCStringCheckerBasic(mgr);
MallocChecker *checker = mgr.registerChecker<MallocChecker>();
- checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption(
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
"Optimistic", false, checker);
checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
@@ -3087,11 +3106,23 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
}
}
+// Intended to be used in InnerPointerChecker to register the part of
+// MallocChecker connected to it.
+void ento::registerInnerPointerCheckerAux(CheckerManager &mgr) {
+ registerCStringCheckerBasic(mgr);
+ MallocChecker *checker = mgr.registerChecker<MallocChecker>();
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ "Optimistic", false, checker);
+ checker->ChecksEnabled[MallocChecker::CK_InnerPointerChecker] = true;
+ checker->CheckNames[MallocChecker::CK_InnerPointerChecker] =
+ mgr.getCurrentCheckName();
+}
+
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
registerCStringCheckerBasic(mgr); \
MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \
- checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( \
+ checker->IsOptimistic = mgr.getAnalyzerOptions().getCheckerBooleanOption( \
"Optimistic", false, checker); \
checker->ChecksEnabled[MallocChecker::CK_##name] = true; \
checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
diff --git a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index fc2ab1d6e3f7..d02ed48bceaa 100644
--- a/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -18,7 +18,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -135,9 +135,9 @@ private:
bool isIntZeroExpr(const Expr *E) const {
if (!E->getType()->isIntegralOrEnumerationType())
return false;
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, Context))
- return Result == 0;
+ return Result.Val.getInt() == 0;
return false;
}
@@ -191,8 +191,11 @@ private:
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(rhse)) {
if (BOp->getOpcode() == BO_Div) {
const Expr *denom = BOp->getRHS()->IgnoreParenImpCasts();
- if (denom->EvaluateAsInt(denomVal, Context))
+ Expr::EvalResult Result;
+ if (denom->EvaluateAsInt(Result, Context)) {
+ denomVal = Result.Val.getInt();
denomKnown = true;
+ }
const Expr *numerator = BOp->getLHS()->IgnoreParenImpCasts();
if (numerator->isEvaluatable(Context))
numeratorKnown = true;
diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 80a3fbe1a409..bb245d82bc2b 100644
--- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
diff --git a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
deleted file mode 100644
index 19c1d077afa1..000000000000
--- a/lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp
+++ /dev/null
@@ -1,525 +0,0 @@
-// MisusedMovedObjectChecker.cpp - Check use of moved-from objects. - C++ -===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This defines checker which checks for potential misuses of a moved-from
-// object. That means method calls on the object or copying it in moved-from
-// state.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ClangSACheckers.h"
-#include "clang/AST/ExprCXX.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-
-struct RegionState {
-private:
- enum Kind { Moved, Reported } K;
- RegionState(Kind InK) : K(InK) {}
-
-public:
- bool isReported() const { return K == Reported; }
- bool isMoved() const { return K == Moved; }
-
- static RegionState getReported() { return RegionState(Reported); }
- static RegionState getMoved() { return RegionState(Moved); }
-
- bool operator==(const RegionState &X) const { return K == X.K; }
- void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
-};
-
-class MisusedMovedObjectChecker
- : public Checker<check::PreCall, check::PostCall, check::EndFunction,
- check::DeadSymbols, check::RegionChanges> {
-public:
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
- void checkPreCall(const CallEvent &MC, CheckerContext &C) const;
- void checkPostCall(const CallEvent &MC, CheckerContext &C) const;
- void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
- ProgramStateRef
- checkRegionChanges(ProgramStateRef State,
- const InvalidatedSymbols *Invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext *LCtx, const CallEvent *Call) const;
- void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const override;
-
-private:
- enum MisuseKind {MK_FunCall, MK_Copy, MK_Move};
- class MovedBugVisitor : public BugReporterVisitor {
- public:
- MovedBugVisitor(const MemRegion *R) : Region(R), Found(false) {}
-
- void Profile(llvm::FoldingSetNodeID &ID) const override {
- static int X = 0;
- ID.AddPointer(&X);
- ID.AddPointer(Region);
- }
-
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
-
- private:
- // The tracked region.
- const MemRegion *Region;
- bool Found;
- };
-
- mutable std::unique_ptr<BugType> BT;
- ExplodedNode *reportBug(const MemRegion *Region, const CallEvent &Call,
- CheckerContext &C, MisuseKind MK) const;
- bool isInMoveSafeContext(const LocationContext *LC) const;
- bool isStateResetMethod(const CXXMethodDecl *MethodDec) const;
- bool isMoveSafeMethod(const CXXMethodDecl *MethodDec) const;
- const ExplodedNode *getMoveLocation(const ExplodedNode *N,
- const MemRegion *Region,
- CheckerContext &C) const;
-};
-} // end anonymous namespace
-
-REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, RegionState)
-
-// If a region is removed all of the subregions needs to be removed too.
-static ProgramStateRef removeFromState(ProgramStateRef State,
- const MemRegion *Region) {
- if (!Region)
- return State;
- for (auto &E : State->get<TrackedRegionMap>()) {
- if (E.first->isSubRegionOf(Region))
- State = State->remove<TrackedRegionMap>(E.first);
- }
- return State;
-}
-
-static bool isAnyBaseRegionReported(ProgramStateRef State,
- const MemRegion *Region) {
- for (auto &E : State->get<TrackedRegionMap>()) {
- if (Region->isSubRegionOf(E.first) && E.second.isReported())
- return true;
- }
- return false;
-}
-
-std::shared_ptr<PathDiagnosticPiece>
-MisusedMovedObjectChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) {
- // We need only the last move of the reported object's region.
- // The visitor walks the ExplodedGraph backwards.
- if (Found)
- return nullptr;
- ProgramStateRef State = N->getState();
- ProgramStateRef StatePrev = PrevN->getState();
- const RegionState *TrackedObject = State->get<TrackedRegionMap>(Region);
- const RegionState *TrackedObjectPrev =
- StatePrev->get<TrackedRegionMap>(Region);
- if (!TrackedObject)
- return nullptr;
- if (TrackedObjectPrev && TrackedObject)
- return nullptr;
-
- // Retrieve the associated statement.
- const Stmt *S = PathDiagnosticLocation::getStmt(N);
- if (!S)
- return nullptr;
- Found = true;
-
- std::string ObjectName;
- if (const auto DecReg = Region->getAs<DeclRegion>()) {
- const auto *RegionDecl = dyn_cast<NamedDecl>(DecReg->getDecl());
- ObjectName = RegionDecl->getNameAsString();
- }
- std::string InfoText;
- if (ObjectName != "")
- InfoText = "'" + ObjectName + "' became 'moved-from' here";
- else
- InfoText = "Became 'moved-from' here";
-
- // Generate the extra diagnostic.
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, InfoText, true);
-}
-
-const ExplodedNode *MisusedMovedObjectChecker::getMoveLocation(
- const ExplodedNode *N, const MemRegion *Region, CheckerContext &C) const {
- // Walk the ExplodedGraph backwards and find the first node that referred to
- // the tracked region.
- const ExplodedNode *MoveNode = N;
-
- while (N) {
- ProgramStateRef State = N->getState();
- if (!State->get<TrackedRegionMap>(Region))
- break;
- MoveNode = N;
- N = N->pred_empty() ? nullptr : *(N->pred_begin());
- }
- return MoveNode;
-}
-
-ExplodedNode *MisusedMovedObjectChecker::reportBug(const MemRegion *Region,
- const CallEvent &Call,
- CheckerContext &C,
- MisuseKind MK) const {
- if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT)
- BT.reset(new BugType(this, "Usage of a 'moved-from' object",
- "C++ move semantics"));
-
- // Uniqueing report to the same object.
- PathDiagnosticLocation LocUsedForUniqueing;
- const ExplodedNode *MoveNode = getMoveLocation(N, Region, C);
-
- if (const Stmt *MoveStmt = PathDiagnosticLocation::getStmt(MoveNode))
- LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
- MoveStmt, C.getSourceManager(), MoveNode->getLocationContext());
-
- // Creating the error message.
- std::string ErrorMessage;
- switch(MK) {
- case MK_FunCall:
- ErrorMessage = "Method call on a 'moved-from' object";
- break;
- case MK_Copy:
- ErrorMessage = "Copying a 'moved-from' object";
- break;
- case MK_Move:
- ErrorMessage = "Moving a 'moved-from' object";
- break;
- }
- if (const auto DecReg = Region->getAs<DeclRegion>()) {
- const auto *RegionDecl = dyn_cast<NamedDecl>(DecReg->getDecl());
- ErrorMessage += " '" + RegionDecl->getNameAsString() + "'";
- }
-
- auto R =
- llvm::make_unique<BugReport>(*BT, ErrorMessage, N, LocUsedForUniqueing,
- MoveNode->getLocationContext()->getDecl());
- R->addVisitor(llvm::make_unique<MovedBugVisitor>(Region));
- C.emitReport(std::move(R));
- return N;
- }
- return nullptr;
-}
-
-// Removing the function parameters' MemRegion from the state. This is needed
-// for PODs where the trivial destructor does not even created nor executed.
-void MisusedMovedObjectChecker::checkEndFunction(const ReturnStmt *RS,
- CheckerContext &C) const {
- auto State = C.getState();
- TrackedRegionMapTy Objects = State->get<TrackedRegionMap>();
- if (Objects.isEmpty())
- return;
-
- auto LC = C.getLocationContext();
-
- const auto LD = dyn_cast_or_null<FunctionDecl>(LC->getDecl());
- if (!LD)
- return;
- llvm::SmallSet<const MemRegion *, 8> InvalidRegions;
-
- for (auto Param : LD->parameters()) {
- auto Type = Param->getType().getTypePtrOrNull();
- if (!Type)
- continue;
- if (!Type->isPointerType() && !Type->isReferenceType()) {
- InvalidRegions.insert(State->getLValue(Param, LC).getAsRegion());
- }
- }
-
- if (InvalidRegions.empty())
- return;
-
- for (const auto &E : State->get<TrackedRegionMap>()) {
- if (InvalidRegions.count(E.first->getBaseRegion()))
- State = State->remove<TrackedRegionMap>(E.first);
- }
-
- C.addTransition(State);
-}
-
-void MisusedMovedObjectChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
- const auto *AFC = dyn_cast<AnyFunctionCall>(&Call);
- if (!AFC)
- return;
-
- ProgramStateRef State = C.getState();
- const auto MethodDecl = dyn_cast_or_null<CXXMethodDecl>(AFC->getDecl());
- if (!MethodDecl)
- return;
-
- const auto *ConstructorDecl = dyn_cast<CXXConstructorDecl>(MethodDecl);
-
- const auto *CC = dyn_cast_or_null<CXXConstructorCall>(&Call);
- // Check if an object became moved-from.
- // Object can become moved from after a call to move assignment operator or
- // move constructor .
- if (ConstructorDecl && !ConstructorDecl->isMoveConstructor())
- return;
-
- if (!ConstructorDecl && !MethodDecl->isMoveAssignmentOperator())
- return;
-
- const auto ArgRegion = AFC->getArgSVal(0).getAsRegion();
- if (!ArgRegion)
- return;
-
- // Skip moving the object to itself.
- if (CC && CC->getCXXThisVal().getAsRegion() == ArgRegion)
- return;
- if (const auto *IC = dyn_cast<CXXInstanceCall>(AFC))
- if (IC->getCXXThisVal().getAsRegion() == ArgRegion)
- return;
-
- const MemRegion *BaseRegion = ArgRegion->getBaseRegion();
- // Skip temp objects because of their short lifetime.
- if (BaseRegion->getAs<CXXTempObjectRegion>() ||
- AFC->getArgExpr(0)->isRValue())
- return;
- // If it has already been reported do not need to modify the state.
-
- if (State->get<TrackedRegionMap>(ArgRegion))
- return;
- // Mark object as moved-from.
- State = State->set<TrackedRegionMap>(ArgRegion, RegionState::getMoved());
- C.addTransition(State);
-}
-
-bool MisusedMovedObjectChecker::isMoveSafeMethod(
- const CXXMethodDecl *MethodDec) const {
- // We abandon the cases where bool/void/void* conversion happens.
- if (const auto *ConversionDec =
- dyn_cast_or_null<CXXConversionDecl>(MethodDec)) {
- const Type *Tp = ConversionDec->getConversionType().getTypePtrOrNull();
- if (!Tp)
- return false;
- if (Tp->isBooleanType() || Tp->isVoidType() || Tp->isVoidPointerType())
- return true;
- }
- // Function call `empty` can be skipped.
- if (MethodDec && MethodDec->getDeclName().isIdentifier() &&
- (MethodDec->getName().lower() == "empty" ||
- MethodDec->getName().lower() == "isempty"))
- return true;
-
- return false;
-}
-
-bool MisusedMovedObjectChecker::isStateResetMethod(
- const CXXMethodDecl *MethodDec) const {
- if (MethodDec && MethodDec->getDeclName().isIdentifier()) {
- std::string MethodName = MethodDec->getName().lower();
- if (MethodName == "reset" || MethodName == "clear" ||
- MethodName == "destroy")
- return true;
- }
- return false;
-}
-
-// Don't report an error inside a move related operation.
-// We assume that the programmer knows what she does.
-bool MisusedMovedObjectChecker::isInMoveSafeContext(
- const LocationContext *LC) const {
- do {
- const auto *CtxDec = LC->getDecl();
- auto *CtorDec = dyn_cast_or_null<CXXConstructorDecl>(CtxDec);
- auto *DtorDec = dyn_cast_or_null<CXXDestructorDecl>(CtxDec);
- auto *MethodDec = dyn_cast_or_null<CXXMethodDecl>(CtxDec);
- if (DtorDec || (CtorDec && CtorDec->isCopyOrMoveConstructor()) ||
- (MethodDec && MethodDec->isOverloadedOperator() &&
- MethodDec->getOverloadedOperator() == OO_Equal) ||
- isStateResetMethod(MethodDec) || isMoveSafeMethod(MethodDec))
- return true;
- } while ((LC = LC->getParent()));
- return false;
-}
-
-void MisusedMovedObjectChecker::checkPreCall(const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- const LocationContext *LC = C.getLocationContext();
- ExplodedNode *N = nullptr;
-
- // Remove the MemRegions from the map on which a ctor/dtor call or assignment
- // happened.
-
- // Checking constructor calls.
- if (const auto *CC = dyn_cast<CXXConstructorCall>(&Call)) {
- State = removeFromState(State, CC->getCXXThisVal().getAsRegion());
- auto CtorDec = CC->getDecl();
- // Check for copying a moved-from object and report the bug.
- if (CtorDec && CtorDec->isCopyOrMoveConstructor()) {
- const MemRegion *ArgRegion = CC->getArgSVal(0).getAsRegion();
- const RegionState *ArgState = State->get<TrackedRegionMap>(ArgRegion);
- if (ArgState && ArgState->isMoved()) {
- if (!isInMoveSafeContext(LC)) {
- if(CtorDec->isMoveConstructor())
- N = reportBug(ArgRegion, Call, C, MK_Move);
- else
- N = reportBug(ArgRegion, Call, C, MK_Copy);
- State = State->set<TrackedRegionMap>(ArgRegion,
- RegionState::getReported());
- }
- }
- }
- C.addTransition(State, N);
- return;
- }
-
- const auto IC = dyn_cast<CXXInstanceCall>(&Call);
- if (!IC)
- return;
- // In case of destructor call we do not track the object anymore.
- const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
- if (!ThisRegion)
- return;
-
- if (dyn_cast_or_null<CXXDestructorDecl>(Call.getDecl())) {
- State = removeFromState(State, ThisRegion);
- C.addTransition(State);
- return;
- }
-
- const auto MethodDecl = dyn_cast_or_null<CXXMethodDecl>(IC->getDecl());
- if (!MethodDecl)
- return;
- // Checking assignment operators.
- bool OperatorEq = MethodDecl->isOverloadedOperator() &&
- MethodDecl->getOverloadedOperator() == OO_Equal;
- // Remove the tracked object for every assignment operator, but report bug
- // only for move or copy assignment's argument.
- if (OperatorEq) {
- State = removeFromState(State, ThisRegion);
- if (MethodDecl->isCopyAssignmentOperator() ||
- MethodDecl->isMoveAssignmentOperator()) {
- const RegionState *ArgState =
- State->get<TrackedRegionMap>(IC->getArgSVal(0).getAsRegion());
- if (ArgState && ArgState->isMoved() && !isInMoveSafeContext(LC)) {
- const MemRegion *ArgRegion = IC->getArgSVal(0).getAsRegion();
- if(MethodDecl->isMoveAssignmentOperator())
- N = reportBug(ArgRegion, Call, C, MK_Move);
- else
- N = reportBug(ArgRegion, Call, C, MK_Copy);
- State =
- State->set<TrackedRegionMap>(ArgRegion, RegionState::getReported());
- }
- }
- C.addTransition(State, N);
- return;
- }
-
- // The remaining part is check only for method call on a moved-from object.
-
- // We want to investigate the whole object, not only sub-object of a parent
- // class in which the encountered method defined.
- while (const CXXBaseObjectRegion *BR =
- dyn_cast<CXXBaseObjectRegion>(ThisRegion))
- ThisRegion = BR->getSuperRegion();
-
- if (isMoveSafeMethod(MethodDecl))
- return;
-
- if (isStateResetMethod(MethodDecl)) {
- State = removeFromState(State, ThisRegion);
- C.addTransition(State);
- return;
- }
-
- // If it is already reported then we don't report the bug again.
- const RegionState *ThisState = State->get<TrackedRegionMap>(ThisRegion);
- if (!(ThisState && ThisState->isMoved()))
- return;
-
- // Don't report it in case if any base region is already reported
- if (isAnyBaseRegionReported(State, ThisRegion))
- return;
-
- if (isInMoveSafeContext(LC))
- return;
-
- N = reportBug(ThisRegion, Call, C, MK_FunCall);
- State = State->set<TrackedRegionMap>(ThisRegion, RegionState::getReported());
- C.addTransition(State, N);
-}
-
-void MisusedMovedObjectChecker::checkDeadSymbols(SymbolReaper &SymReaper,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
- for (TrackedRegionMapTy::value_type E : TrackedRegions) {
- const MemRegion *Region = E.first;
- bool IsRegDead = !SymReaper.isLiveRegion(Region);
-
- // Remove the dead regions from the region map.
- if (IsRegDead) {
- State = State->remove<TrackedRegionMap>(Region);
- }
- }
- C.addTransition(State);
-}
-
-ProgramStateRef MisusedMovedObjectChecker::checkRegionChanges(
- ProgramStateRef State, const InvalidatedSymbols *Invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
- const CallEvent *Call) const {
- // In case of an InstanceCall don't remove the ThisRegion from the GDM since
- // it is handled in checkPreCall and checkPostCall.
- const MemRegion *ThisRegion = nullptr;
- if (const auto *IC = dyn_cast_or_null<CXXInstanceCall>(Call)) {
- ThisRegion = IC->getCXXThisVal().getAsRegion();
- }
-
- for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
- E = ExplicitRegions.end();
- I != E; ++I) {
- const auto *Region = *I;
- if (ThisRegion != Region) {
- State = removeFromState(State, Region);
- }
- }
-
- return State;
-}
-
-void MisusedMovedObjectChecker::printState(raw_ostream &Out,
- ProgramStateRef State,
- const char *NL,
- const char *Sep) const {
-
- TrackedRegionMapTy RS = State->get<TrackedRegionMap>();
-
- if (!RS.isEmpty()) {
- Out << Sep << "Moved-from objects :" << NL;
- for (auto I: RS) {
- I.first->dumpToStream(Out);
- if (I.second.isMoved())
- Out << ": moved";
- else
- Out << ": moved and reported";
- Out << NL;
- }
- }
-}
-void ento::registerMisusedMovedObjectChecker(CheckerManager &mgr) {
- mgr.registerChecker<MisusedMovedObjectChecker>();
-}
diff --git a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index 5060b0e0a6e0..e3b24f20b0f0 100644
--- a/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -82,7 +82,9 @@ void ento::registerMmapWriteExecChecker(CheckerManager &mgr) {
MmapWriteExecChecker *Mwec =
mgr.registerChecker<MmapWriteExecChecker>();
Mwec->ProtExecOv =
- mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtExec", 0x04, Mwec);
+ mgr.getAnalyzerOptions()
+ .getCheckerIntegerOption("MmapProtExec", 0x04, Mwec);
Mwec->ProtReadOv =
- mgr.getAnalyzerOptions().getOptionAsInteger("MmapProtRead", 0x01, Mwec);
+ mgr.getAnalyzerOptions()
+ .getCheckerIntegerOption("MmapProtRead", 0x01, Mwec);
}
diff --git a/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
new file mode 100644
index 000000000000..6efa2dfbe5b4
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -0,0 +1,740 @@
+// MoveChecker.cpp - Check use of moved-from objects. - C++ ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines checker which checks for potential misuses of a moved-from
+// object. That means method calls on the object or copying it in moved-from
+// state.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSet.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct RegionState {
+private:
+ enum Kind { Moved, Reported } K;
+ RegionState(Kind InK) : K(InK) {}
+
+public:
+ bool isReported() const { return K == Reported; }
+ bool isMoved() const { return K == Moved; }
+
+ static RegionState getReported() { return RegionState(Reported); }
+ static RegionState getMoved() { return RegionState(Moved); }
+
+ bool operator==(const RegionState &X) const { return K == X.K; }
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddInteger(K); }
+};
+} // end of anonymous namespace
+
+namespace {
+class MoveChecker
+ : public Checker<check::PreCall, check::PostCall,
+ check::DeadSymbols, check::RegionChanges> {
+public:
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &MC, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &MC, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> RequestedRegions,
+ ArrayRef<const MemRegion *> InvalidatedRegions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+
+private:
+ enum MisuseKind { MK_FunCall, MK_Copy, MK_Move, MK_Dereference };
+ enum StdObjectKind { SK_NonStd, SK_Unsafe, SK_Safe, SK_SmartPtr };
+
+ enum AggressivenessKind { // In any case, don't warn after a reset.
+ AK_Invalid = -1,
+ AK_KnownsOnly = 0, // Warn only about known move-unsafe classes.
+ AK_KnownsAndLocals = 1, // Also warn about all local objects.
+ AK_All = 2, // Warn on any use-after-move.
+ AK_NumKinds = AK_All
+ };
+
+ static bool misuseCausesCrash(MisuseKind MK) {
+ return MK == MK_Dereference;
+ }
+
+ struct ObjectKind {
+ // Is this a local variable or a local rvalue reference?
+ bool IsLocal;
+ // Is this an STL object? If so, of what kind?
+ StdObjectKind StdKind;
+ };
+
+ // STL smart pointers are automatically re-initialized to null when moved
+ // from. So we can't warn on many methods, but we can warn when it is
+ // dereferenced, which is UB even if the resulting lvalue never gets read.
+ const llvm::StringSet<> StdSmartPtrClasses = {
+ "shared_ptr",
+ "unique_ptr",
+ "weak_ptr",
+ };
+
+ // Not all of these are entirely move-safe, but they do provide *some*
+ // guarantees, and it means that somebody is using them after move
+ // in a valid manner.
+ // TODO: We can still try to identify *unsafe* use after move,
+ // like we did with smart pointers.
+ const llvm::StringSet<> StdSafeClasses = {
+ "basic_filebuf",
+ "basic_ios",
+ "future",
+ "optional",
+ "packaged_task"
+ "promise",
+ "shared_future",
+ "shared_lock",
+ "thread",
+ "unique_lock",
+ };
+
+ // Should we bother tracking the state of the object?
+ bool shouldBeTracked(ObjectKind OK) const {
+ // In non-aggressive mode, only warn on use-after-move of local variables
+ // (or local rvalue references) and of STL objects. The former is possible
+ // because local variables (or local rvalue references) are not tempting
+ // their user to re-use the storage. The latter is possible because STL
+ // objects are known to end up in a valid but unspecified state after the
+ // move and their state-reset methods are also known, which allows us to
+ // predict precisely when use-after-move is invalid.
+ // Some STL objects are known to conform to additional contracts after move,
+ // so they are not tracked. However, smart pointers specifically are tracked
+ // because we can perform extra checking over them.
+ // In aggressive mode, warn on any use-after-move because the user has
+ // intentionally asked us to completely eliminate use-after-move
+ // in his code.
+ return (Aggressiveness == AK_All) ||
+ (Aggressiveness >= AK_KnownsAndLocals && OK.IsLocal) ||
+ OK.StdKind == SK_Unsafe || OK.StdKind == SK_SmartPtr;
+ }
+
+ // Some objects only suffer from some kinds of misuses, but we need to track
+ // them anyway because we cannot know in advance what misuse will we find.
+ bool shouldWarnAbout(ObjectKind OK, MisuseKind MK) const {
+ // Additionally, only warn on smart pointers when they are dereferenced (or
+ // local or we are aggressive).
+ return shouldBeTracked(OK) &&
+ ((Aggressiveness == AK_All) ||
+ (Aggressiveness >= AK_KnownsAndLocals && OK.IsLocal) ||
+ OK.StdKind != SK_SmartPtr || MK == MK_Dereference);
+ }
+
+ // Obtains ObjectKind of an object. Because class declaration cannot always
+ // be easily obtained from the memory region, it is supplied separately.
+ ObjectKind classifyObject(const MemRegion *MR, const CXXRecordDecl *RD) const;
+
+ // Classifies the object and dumps a user-friendly description string to
+ // the stream.
+ void explainObject(llvm::raw_ostream &OS, const MemRegion *MR,
+ const CXXRecordDecl *RD, MisuseKind MK) const;
+
+ bool belongsTo(const CXXRecordDecl *RD, const llvm::StringSet<> &Set) const;
+
+ class MovedBugVisitor : public BugReporterVisitor {
+ public:
+ MovedBugVisitor(const MoveChecker &Chk, const MemRegion *R,
+ const CXXRecordDecl *RD, MisuseKind MK)
+ : Chk(Chk), Region(R), RD(RD), MK(MK), Found(false) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Region);
+ // Don't add RD because it's, in theory, uniquely determined by
+ // the region. In practice though, it's not always possible to obtain
+ // the declaration directly from the region, that's why we store it
+ // in the first place.
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ const MoveChecker &Chk;
+ // The tracked region.
+ const MemRegion *Region;
+ // The class of the tracked object.
+ const CXXRecordDecl *RD;
+ // How exactly the object was misused.
+ const MisuseKind MK;
+ bool Found;
+ };
+
+ AggressivenessKind Aggressiveness;
+
+public:
+ void setAggressiveness(StringRef Str) {
+ Aggressiveness =
+ llvm::StringSwitch<AggressivenessKind>(Str)
+ .Case("KnownsOnly", AK_KnownsOnly)
+ .Case("KnownsAndLocals", AK_KnownsAndLocals)
+ .Case("All", AK_All)
+ .Default(AK_KnownsAndLocals); // A sane default.
+ };
+
+private:
+ mutable std::unique_ptr<BugType> BT;
+
+ // Check if the given form of potential misuse of a given object
+ // should be reported. If so, get it reported. The callback from which
+ // this function was called should immediately return after the call
+ // because this function adds one or two transitions.
+ void modelUse(ProgramStateRef State, const MemRegion *Region,
+ const CXXRecordDecl *RD, MisuseKind MK,
+ CheckerContext &C) const;
+
+ // Returns the exploded node against which the report was emitted.
+ // The caller *must* add any further transitions against this node.
+ ExplodedNode *reportBug(const MemRegion *Region, const CXXRecordDecl *RD,
+ CheckerContext &C, MisuseKind MK) const;
+
+ bool isInMoveSafeContext(const LocationContext *LC) const;
+ bool isStateResetMethod(const CXXMethodDecl *MethodDec) const;
+ bool isMoveSafeMethod(const CXXMethodDecl *MethodDec) const;
+ const ExplodedNode *getMoveLocation(const ExplodedNode *N,
+ const MemRegion *Region,
+ CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, RegionState)
+
+// If a region is removed all of the subregions needs to be removed too.
+static ProgramStateRef removeFromState(ProgramStateRef State,
+ const MemRegion *Region) {
+ if (!Region)
+ return State;
+ for (auto &E : State->get<TrackedRegionMap>()) {
+ if (E.first->isSubRegionOf(Region))
+ State = State->remove<TrackedRegionMap>(E.first);
+ }
+ return State;
+}
+
+static bool isAnyBaseRegionReported(ProgramStateRef State,
+ const MemRegion *Region) {
+ for (auto &E : State->get<TrackedRegionMap>()) {
+ if (Region->isSubRegionOf(E.first) && E.second.isReported())
+ return true;
+ }
+ return false;
+}
+
+static const MemRegion *unwrapRValueReferenceIndirection(const MemRegion *MR) {
+ if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(MR)) {
+ SymbolRef Sym = SR->getSymbol();
+ if (Sym->getType()->isRValueReferenceType())
+ if (const MemRegion *OriginMR = Sym->getOriginRegion())
+ return OriginMR;
+ }
+ return MR;
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+MoveChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC, BugReport &BR) {
+ // We need only the last move of the reported object's region.
+ // The visitor walks the ExplodedGraph backwards.
+ if (Found)
+ return nullptr;
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = N->getFirstPred()->getState();
+ const RegionState *TrackedObject = State->get<TrackedRegionMap>(Region);
+ const RegionState *TrackedObjectPrev =
+ StatePrev->get<TrackedRegionMap>(Region);
+ if (!TrackedObject)
+ return nullptr;
+ if (TrackedObjectPrev && TrackedObject)
+ return nullptr;
+
+ // Retrieve the associated statement.
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ if (!S)
+ return nullptr;
+ Found = true;
+
+ SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+
+ ObjectKind OK = Chk.classifyObject(Region, RD);
+ switch (OK.StdKind) {
+ case SK_SmartPtr:
+ if (MK == MK_Dereference) {
+ OS << "Smart pointer";
+ Chk.explainObject(OS, Region, RD, MK);
+ OS << " is reset to null when moved from";
+ break;
+ }
+
+ // If it's not a dereference, we don't care if it was reset to null
+ // or that it is even a smart pointer.
+ LLVM_FALLTHROUGH;
+ case SK_NonStd:
+ case SK_Safe:
+ OS << "Object";
+ Chk.explainObject(OS, Region, RD, MK);
+ OS << " is moved";
+ break;
+ case SK_Unsafe:
+ OS << "Object";
+ Chk.explainObject(OS, Region, RD, MK);
+ OS << " is left in a valid but unspecified state after move";
+ break;
+ }
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true);
+}
+
+const ExplodedNode *MoveChecker::getMoveLocation(const ExplodedNode *N,
+ const MemRegion *Region,
+ CheckerContext &C) const {
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked region.
+ const ExplodedNode *MoveNode = N;
+
+ while (N) {
+ ProgramStateRef State = N->getState();
+ if (!State->get<TrackedRegionMap>(Region))
+ break;
+ MoveNode = N;
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ }
+ return MoveNode;
+}
+
+void MoveChecker::modelUse(ProgramStateRef State, const MemRegion *Region,
+ const CXXRecordDecl *RD, MisuseKind MK,
+ CheckerContext &C) const {
+ assert(!C.isDifferent() && "No transitions should have been made by now");
+ const RegionState *RS = State->get<TrackedRegionMap>(Region);
+ ObjectKind OK = classifyObject(Region, RD);
+
+ // Just in case: if it's not a smart pointer but it does have operator *,
+ // we shouldn't call the bug a dereference.
+ if (MK == MK_Dereference && OK.StdKind != SK_SmartPtr)
+ MK = MK_FunCall;
+
+ if (!RS || !shouldWarnAbout(OK, MK)
+ || isInMoveSafeContext(C.getLocationContext())) {
+ // Finalize changes made by the caller.
+ C.addTransition(State);
+ return;
+ }
+
+ // Don't report it in case if any base region is already reported.
+ // But still generate a sink in case of UB.
+ // And still finalize changes made by the caller.
+ if (isAnyBaseRegionReported(State, Region)) {
+ if (misuseCausesCrash(MK)) {
+ C.generateSink(State, C.getPredecessor());
+ } else {
+ C.addTransition(State);
+ }
+ return;
+ }
+
+ ExplodedNode *N = reportBug(Region, RD, C, MK);
+
+ // If the program has already crashed on this path, don't bother.
+ if (N->isSink())
+ return;
+
+ State = State->set<TrackedRegionMap>(Region, RegionState::getReported());
+ C.addTransition(State, N);
+}
+
+ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
+ const CXXRecordDecl *RD, CheckerContext &C,
+ MisuseKind MK) const {
+ if (ExplodedNode *N = misuseCausesCrash(MK) ? C.generateErrorNode()
+ : C.generateNonFatalErrorNode()) {
+
+ if (!BT)
+ BT.reset(new BugType(this, "Use-after-move",
+ "C++ move semantics"));
+
+ // Uniqueing report to the same object.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const ExplodedNode *MoveNode = getMoveLocation(N, Region, C);
+
+ if (const Stmt *MoveStmt = PathDiagnosticLocation::getStmt(MoveNode))
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ MoveStmt, C.getSourceManager(), MoveNode->getLocationContext());
+
+ // Creating the error message.
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+ switch(MK) {
+ case MK_FunCall:
+ OS << "Method called on moved-from object";
+ explainObject(OS, Region, RD, MK);
+ break;
+ case MK_Copy:
+ OS << "Moved-from object";
+ explainObject(OS, Region, RD, MK);
+ OS << " is copied";
+ break;
+ case MK_Move:
+ OS << "Moved-from object";
+ explainObject(OS, Region, RD, MK);
+ OS << " is moved";
+ break;
+ case MK_Dereference:
+ OS << "Dereference of null smart pointer";
+ explainObject(OS, Region, RD, MK);
+ break;
+ }
+
+ auto R =
+ llvm::make_unique<BugReport>(*BT, OS.str(), N, LocUsedForUniqueing,
+ MoveNode->getLocationContext()->getDecl());
+ R->addVisitor(llvm::make_unique<MovedBugVisitor>(*this, Region, RD, MK));
+ C.emitReport(std::move(R));
+ return N;
+ }
+ return nullptr;
+}
+
+void MoveChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *AFC = dyn_cast<AnyFunctionCall>(&Call);
+ if (!AFC)
+ return;
+
+ ProgramStateRef State = C.getState();
+ const auto MethodDecl = dyn_cast_or_null<CXXMethodDecl>(AFC->getDecl());
+ if (!MethodDecl)
+ return;
+
+ // Check if an object became moved-from.
+ // Object can become moved from after a call to move assignment operator or
+ // move constructor .
+ const auto *ConstructorDecl = dyn_cast<CXXConstructorDecl>(MethodDecl);
+ if (ConstructorDecl && !ConstructorDecl->isMoveConstructor())
+ return;
+
+ if (!ConstructorDecl && !MethodDecl->isMoveAssignmentOperator())
+ return;
+
+ const auto ArgRegion = AFC->getArgSVal(0).getAsRegion();
+ if (!ArgRegion)
+ return;
+
+ // Skip moving the object to itself.
+ const auto *CC = dyn_cast_or_null<CXXConstructorCall>(&Call);
+ if (CC && CC->getCXXThisVal().getAsRegion() == ArgRegion)
+ return;
+
+ if (const auto *IC = dyn_cast<CXXInstanceCall>(AFC))
+ if (IC->getCXXThisVal().getAsRegion() == ArgRegion)
+ return;
+
+ const MemRegion *BaseRegion = ArgRegion->getBaseRegion();
+ // Skip temp objects because of their short lifetime.
+ if (BaseRegion->getAs<CXXTempObjectRegion>() ||
+ AFC->getArgExpr(0)->isRValue())
+ return;
+ // If it has already been reported do not need to modify the state.
+
+ if (State->get<TrackedRegionMap>(ArgRegion))
+ return;
+
+ const CXXRecordDecl *RD = MethodDecl->getParent();
+ ObjectKind OK = classifyObject(ArgRegion, RD);
+ if (shouldBeTracked(OK)) {
+ // Mark object as moved-from.
+ State = State->set<TrackedRegionMap>(ArgRegion, RegionState::getMoved());
+ C.addTransition(State);
+ return;
+ }
+ assert(!C.isDifferent() && "Should not have made transitions on this path!");
+}
+
+bool MoveChecker::isMoveSafeMethod(const CXXMethodDecl *MethodDec) const {
+ // We abandon the cases where bool/void/void* conversion happens.
+ if (const auto *ConversionDec =
+ dyn_cast_or_null<CXXConversionDecl>(MethodDec)) {
+ const Type *Tp = ConversionDec->getConversionType().getTypePtrOrNull();
+ if (!Tp)
+ return false;
+ if (Tp->isBooleanType() || Tp->isVoidType() || Tp->isVoidPointerType())
+ return true;
+ }
+ // Function call `empty` can be skipped.
+ return (MethodDec && MethodDec->getDeclName().isIdentifier() &&
+ (MethodDec->getName().lower() == "empty" ||
+ MethodDec->getName().lower() == "isempty"));
+}
+
+bool MoveChecker::isStateResetMethod(const CXXMethodDecl *MethodDec) const {
+ if (!MethodDec)
+ return false;
+ if (MethodDec->hasAttr<ReinitializesAttr>())
+ return true;
+ if (MethodDec->getDeclName().isIdentifier()) {
+ std::string MethodName = MethodDec->getName().lower();
+ // TODO: Some of these methods (eg., resize) are not always resetting
+ // the state, so we should consider looking at the arguments.
+ if (MethodName == "reset" || MethodName == "clear" ||
+ MethodName == "destroy" || MethodName == "resize" ||
+ MethodName == "shrink")
+ return true;
+ }
+ return false;
+}
+
+// Don't report an error inside a move related operation.
+// We assume that the programmer knows what she does.
+bool MoveChecker::isInMoveSafeContext(const LocationContext *LC) const {
+ do {
+ const auto *CtxDec = LC->getDecl();
+ auto *CtorDec = dyn_cast_or_null<CXXConstructorDecl>(CtxDec);
+ auto *DtorDec = dyn_cast_or_null<CXXDestructorDecl>(CtxDec);
+ auto *MethodDec = dyn_cast_or_null<CXXMethodDecl>(CtxDec);
+ if (DtorDec || (CtorDec && CtorDec->isCopyOrMoveConstructor()) ||
+ (MethodDec && MethodDec->isOverloadedOperator() &&
+ MethodDec->getOverloadedOperator() == OO_Equal) ||
+ isStateResetMethod(MethodDec) || isMoveSafeMethod(MethodDec))
+ return true;
+ } while ((LC = LC->getParent()));
+ return false;
+}
+
+bool MoveChecker::belongsTo(const CXXRecordDecl *RD,
+ const llvm::StringSet<> &Set) const {
+ const IdentifierInfo *II = RD->getIdentifier();
+ return II && Set.count(II->getName());
+}
+
+MoveChecker::ObjectKind
+MoveChecker::classifyObject(const MemRegion *MR,
+ const CXXRecordDecl *RD) const {
+ // Local variables and local rvalue references are classified as "Local".
+ // For the purposes of this checker, we classify move-safe STL types
+ // as not-"STL" types, because that's how the checker treats them.
+ MR = unwrapRValueReferenceIndirection(MR);
+ bool IsLocal =
+ MR && isa<VarRegion>(MR) && isa<StackSpaceRegion>(MR->getMemorySpace());
+
+ if (!RD || !RD->getDeclContext()->isStdNamespace())
+ return { IsLocal, SK_NonStd };
+
+ if (belongsTo(RD, StdSmartPtrClasses))
+ return { IsLocal, SK_SmartPtr };
+
+ if (belongsTo(RD, StdSafeClasses))
+ return { IsLocal, SK_Safe };
+
+ return { IsLocal, SK_Unsafe };
+}
+
+void MoveChecker::explainObject(llvm::raw_ostream &OS, const MemRegion *MR,
+ const CXXRecordDecl *RD, MisuseKind MK) const {
+ // We may need a leading space every time we actually explain anything,
+ // and we never know if we are to explain anything until we try.
+ if (const auto DR =
+ dyn_cast_or_null<DeclRegion>(unwrapRValueReferenceIndirection(MR))) {
+ const auto *RegionDecl = cast<NamedDecl>(DR->getDecl());
+ OS << " '" << RegionDecl->getNameAsString() << "'";
+ }
+
+ ObjectKind OK = classifyObject(MR, RD);
+ switch (OK.StdKind) {
+ case SK_NonStd:
+ case SK_Safe:
+ break;
+ case SK_SmartPtr:
+ if (MK != MK_Dereference)
+ break;
+
+ // We only care about the type if it's a dereference.
+ LLVM_FALLTHROUGH;
+ case SK_Unsafe:
+ OS << " of type '" << RD->getQualifiedNameAsString() << "'";
+ break;
+ };
+}
+
+void MoveChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Remove the MemRegions from the map on which a ctor/dtor call or assignment
+ // happened.
+
+ // Checking constructor calls.
+ if (const auto *CC = dyn_cast<CXXConstructorCall>(&Call)) {
+ State = removeFromState(State, CC->getCXXThisVal().getAsRegion());
+ auto CtorDec = CC->getDecl();
+ // Check for copying a moved-from object and report the bug.
+ if (CtorDec && CtorDec->isCopyOrMoveConstructor()) {
+ const MemRegion *ArgRegion = CC->getArgSVal(0).getAsRegion();
+ const CXXRecordDecl *RD = CtorDec->getParent();
+ MisuseKind MK = CtorDec->isMoveConstructor() ? MK_Move : MK_Copy;
+ modelUse(State, ArgRegion, RD, MK, C);
+ return;
+ }
+ }
+
+ const auto IC = dyn_cast<CXXInstanceCall>(&Call);
+ if (!IC)
+ return;
+
+ // Calling a destructor on a moved object is fine.
+ if (isa<CXXDestructorCall>(IC))
+ return;
+
+ const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
+ if (!ThisRegion)
+ return;
+
+ // The remaining part is check only for method call on a moved-from object.
+ const auto MethodDecl = dyn_cast_or_null<CXXMethodDecl>(IC->getDecl());
+ if (!MethodDecl)
+ return;
+
+ // We want to investigate the whole object, not only sub-object of a parent
+ // class in which the encountered method defined.
+ ThisRegion = ThisRegion->getMostDerivedObjectRegion();
+
+ if (isStateResetMethod(MethodDecl)) {
+ State = removeFromState(State, ThisRegion);
+ C.addTransition(State);
+ return;
+ }
+
+ if (isMoveSafeMethod(MethodDecl))
+ return;
+
+ // Store class declaration as well, for bug reporting purposes.
+ const CXXRecordDecl *RD = MethodDecl->getParent();
+
+ if (MethodDecl->isOverloadedOperator()) {
+ OverloadedOperatorKind OOK = MethodDecl->getOverloadedOperator();
+
+ if (OOK == OO_Equal) {
+ // Remove the tracked object for every assignment operator, but report bug
+ // only for move or copy assignment's argument.
+ State = removeFromState(State, ThisRegion);
+
+ if (MethodDecl->isCopyAssignmentOperator() ||
+ MethodDecl->isMoveAssignmentOperator()) {
+ const MemRegion *ArgRegion = IC->getArgSVal(0).getAsRegion();
+ MisuseKind MK =
+ MethodDecl->isMoveAssignmentOperator() ? MK_Move : MK_Copy;
+ modelUse(State, ArgRegion, RD, MK, C);
+ return;
+ }
+ C.addTransition(State);
+ return;
+ }
+
+ if (OOK == OO_Star || OOK == OO_Arrow) {
+ modelUse(State, ThisRegion, RD, MK_Dereference, C);
+ return;
+ }
+ }
+
+ modelUse(State, ThisRegion, RD, MK_FunCall, C);
+}
+
+void MoveChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
+ for (TrackedRegionMapTy::value_type E : TrackedRegions) {
+ const MemRegion *Region = E.first;
+ bool IsRegDead = !SymReaper.isLiveRegion(Region);
+
+ // Remove the dead regions from the region map.
+ if (IsRegDead) {
+ State = State->remove<TrackedRegionMap>(Region);
+ }
+ }
+ C.addTransition(State);
+}
+
+ProgramStateRef MoveChecker::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> RequestedRegions,
+ ArrayRef<const MemRegion *> InvalidatedRegions,
+ const LocationContext *LCtx, const CallEvent *Call) const {
+ if (Call) {
+ // Relax invalidation upon function calls: only invalidate parameters
+ // that are passed directly via non-const pointers or non-const references
+ // or rvalue references.
+ // In case of an InstanceCall don't invalidate the this-region since
+ // it is fully handled in checkPreCall and checkPostCall.
+ const MemRegion *ThisRegion = nullptr;
+ if (const auto *IC = dyn_cast<CXXInstanceCall>(Call))
+ ThisRegion = IC->getCXXThisVal().getAsRegion();
+
+ // Requested ("explicit") regions are the regions passed into the call
+ // directly, but not all of them end up being invalidated.
+ // But when they do, they appear in the InvalidatedRegions array as well.
+ for (const auto *Region : RequestedRegions) {
+ if (ThisRegion != Region) {
+ if (llvm::find(InvalidatedRegions, Region) !=
+ std::end(InvalidatedRegions)) {
+ State = removeFromState(State, Region);
+ }
+ }
+ }
+ } else {
+ // For invalidations that aren't caused by calls, assume nothing. In
+ // particular, direct write into an object's field invalidates the status.
+ for (const auto *Region : InvalidatedRegions)
+ State = removeFromState(State, Region->getBaseRegion());
+ }
+
+ return State;
+}
+
+void MoveChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ TrackedRegionMapTy RS = State->get<TrackedRegionMap>();
+
+ if (!RS.isEmpty()) {
+ Out << Sep << "Moved-from objects :" << NL;
+ for (auto I: RS) {
+ I.first->dumpToStream(Out);
+ if (I.second.isMoved())
+ Out << ": moved";
+ else
+ Out << ": moved and reported";
+ Out << NL;
+ }
+ }
+}
+void ento::registerMoveChecker(CheckerManager &mgr) {
+ MoveChecker *chk = mgr.registerChecker<MoveChecker>();
+ chk->setAggressiveness(
+ mgr.getAnalyzerOptions().getCheckerStringOption("WarnOn", "", chk));
+}
diff --git a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index 0e7894788c87..4ed1b25cb09e 100644
--- a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 2bd68b625c1f..06c43c6b9470 100644
--- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
diff --git a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index 8a5c769b6b50..83d4b5b0758b 100644
--- a/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
-#include "SelectorExtras.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
+#include "clang/Analysis/SelectorExtras.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
diff --git a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 01d2c0491b85..3c4363b6850e 100644
--- a/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -192,7 +192,7 @@ NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
*BTAttrNonNull,
"Null pointer passed as an argument to a 'nonnull' parameter", ErrorNode);
if (ArgE)
- bugreporter::trackNullOrUndefValue(ErrorNode, ArgE, *R);
+ bugreporter::trackExpressionValue(ErrorNode, ArgE, *R);
return R;
}
@@ -208,9 +208,7 @@ std::unique_ptr<BugReport> NonNullParamChecker::genReportReferenceToNullPointer(
const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
if (!ArgEDeref)
ArgEDeref = ArgE;
- bugreporter::trackNullOrUndefValue(ErrorNode,
- ArgEDeref,
- *R);
+ bugreporter::trackExpressionValue(ErrorNode, ArgEDeref, *R);
}
return R;
diff --git a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 6f3180eb839a..ce9e950aa9ba 100644
--- a/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -21,7 +21,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index 7d1ca61c97a9..e535d1ae27ac 100644
--- a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -25,7 +25,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -139,7 +139,6 @@ private:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -175,7 +174,8 @@ private:
if (Error == ErrorKind::NilAssignedToNonnull ||
Error == ErrorKind::NilPassedToNonnull ||
Error == ErrorKind::NilReturnedToNonnull)
- bugreporter::trackNullOrUndefValue(N, ValueExpr, *R);
+ if (const auto *Ex = dyn_cast<Expr>(ValueExpr))
+ bugreporter::trackExpressionValue(N, Ex, *R);
}
BR.emitReport(std::move(R));
}
@@ -185,7 +185,7 @@ private:
const SymbolicRegion *getTrackRegion(SVal Val,
bool CheckSuperRegion = false) const;
- /// Returns true if the call is diagnosable in the currrent analyzer
+ /// Returns true if the call is diagnosable in the current analyzer
/// configuration.
bool isDiagnosableCall(const CallEvent &Call) const {
if (NoDiagnoseCallsToSystemHeaders && Call.isInSystemHeader())
@@ -293,11 +293,10 @@ NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
std::shared_ptr<PathDiagnosticPiece>
NullabilityChecker::NullabilityBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
ProgramStateRef State = N->getState();
- ProgramStateRef StatePrev = PrevN->getState();
+ ProgramStateRef StatePrev = N->getFirstPred()->getState();
const NullabilityState *TrackedNullab = State->get<NullabilityMap>(Region);
const NullabilityState *TrackedNullabPrev =
@@ -311,7 +310,7 @@ NullabilityChecker::NullabilityBugVisitor::VisitNode(const ExplodedNode *N,
// Retrieve the associated statement.
const Stmt *S = TrackedNullab->getNullabilitySource();
- if (!S || S->getLocStart().isInvalid()) {
+ if (!S || S->getBeginLoc().isInvalid()) {
S = PathDiagnosticLocation::getStmt(N);
}
@@ -330,8 +329,8 @@ NullabilityChecker::NullabilityBugVisitor::VisitNode(const ExplodedNode *N,
nullptr);
}
-/// Returns true when the value stored at the given location is null
-/// and the passed in type is nonnnull.
+/// Returns true when the value stored at the given location has been
+/// constrained to null after being passed through an object of nonnnull type.
static bool checkValueAtLValForInvariantViolation(ProgramStateRef State,
SVal LV, QualType T) {
if (getNullabilityAnnotation(T) != Nullability::Nonnull)
@@ -341,9 +340,14 @@ static bool checkValueAtLValForInvariantViolation(ProgramStateRef State,
if (!RegionVal)
return false;
- auto StoredVal =
- State->getSVal(RegionVal->getRegion()).getAs<DefinedOrUnknownSVal>();
- if (!StoredVal)
+ // If the value was constrained to null *after* it was passed through that
+ // location, it could not have been a concrete pointer *when* it was passed.
+ // In that case we would have handled the situation when the value was
+ // bound to that location, by emitting (or not emitting) a report.
+ // Therefore we are only interested in symbolic regions that can be either
+ // null or non-null depending on the value of their respective symbol.
+ auto StoredVal = State->getSVal(*RegionVal).getAs<loc::MemRegionVal>();
+ if (!StoredVal || !isa<SymbolicRegion>(StoredVal->getRegion()))
return false;
if (getNullConstraint(*StoredVal, State) == NullConstraint::IsNull)
@@ -447,9 +451,6 @@ void NullabilityChecker::reportBugIfInvariantHolds(StringRef Msg,
/// Cleaning up the program state.
void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
- if (!SR.hasDeadSymbols())
- return;
-
ProgramStateRef State = C.getState();
NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
for (NullabilityMapTy::iterator I = Nullabilities.begin(),
@@ -766,7 +767,7 @@ void NullabilityChecker::checkPostCall(const CallEvent &Call,
// CG headers are misannotated. Do not warn for symbols that are the results
// of CG calls.
const SourceManager &SM = C.getSourceManager();
- StringRef FilePath = SM.getFilename(SM.getSpellingLoc(Decl->getLocStart()));
+ StringRef FilePath = SM.getFilename(SM.getSpellingLoc(Decl->getBeginLoc()));
if (llvm::sys::path::filename(FilePath).startswith("CG")) {
State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
C.addTransition(State);
@@ -1174,10 +1175,15 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
NullabilityMapTy B = State->get<NullabilityMap>();
+ if (State->get<InvariantViolated>())
+ Out << Sep << NL
+ << "Nullability invariant was violated, warnings suppressed." << NL;
+
if (B.isEmpty())
return;
- Out << Sep << NL;
+ if (!State->get<InvariantViolated>())
+ Out << Sep << NL;
for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
Out << I->first << " : ";
@@ -1194,7 +1200,7 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
checker->NeedTracking = checker->NeedTracking || trackingRequired; \
checker->NoDiagnoseCallsToSystemHeaders = \
checker->NoDiagnoseCallsToSystemHeaders || \
- mgr.getAnalyzerOptions().getBooleanOption( \
+ mgr.getAnalyzerOptions().getCheckerBooleanOption( \
"NoDiagnoseCallsToSystemHeaders", false, checker, true); \
}
diff --git a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index d1749cfdbe27..4e3a7205f1f4 100644
--- a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -26,7 +26,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -77,7 +77,7 @@ void Callback::run(const MatchFinder::MatchResult &Result) {
// to zero literals in non-pedantic mode.
// FIXME: Introduce an AST matcher to implement the macro-related logic?
bool MacroIndicatesWeShouldSkipTheCheck = false;
- SourceLocation Loc = CheckIfNull->getLocStart();
+ SourceLocation Loc = CheckIfNull->getBeginLoc();
if (Loc.isMacroID()) {
StringRef MacroName = Lexer::getImmediateMacroName(
Loc, ACtx.getSourceManager(), ACtx.getLangOpts());
@@ -87,9 +87,10 @@ void Callback::run(const MatchFinder::MatchResult &Result) {
MacroIndicatesWeShouldSkipTheCheck = true;
}
if (!MacroIndicatesWeShouldSkipTheCheck) {
- llvm::APSInt Result;
+ Expr::EvalResult EVResult;
if (CheckIfNull->IgnoreParenCasts()->EvaluateAsInt(
- Result, ACtx, Expr::SE_AllowSideEffects)) {
+ EVResult, ACtx, Expr::SE_AllowSideEffects)) {
+ llvm::APSInt Result = EVResult.Val.getInt();
if (Result == 0) {
if (!C->Pedantic)
return;
@@ -346,5 +347,5 @@ void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
NumberObjectConversionChecker *Chk =
Mgr.registerChecker<NumberObjectConversionChecker>();
Chk->Pedantic =
- Mgr.getAnalyzerOptions().getBooleanOption("Pedantic", false, Chk);
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption("Pedantic", false, Chk);
}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index b7339fe79f69..185b57575cb0 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -49,7 +49,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"for @synchronized"));
auto report =
llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
}
return;
@@ -73,7 +73,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
"(no synchronization will occur)"));
auto report =
llvm::make_unique<BugReport>(*BT_null, BT_null->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
return;
@@ -89,6 +89,6 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
}
void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
- if (mgr.getLangOpts().ObjC2)
+ if (mgr.getLangOpts().ObjC)
mgr.registerChecker<ObjCAtSyncChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index 81bcda51b8f8..0424958f8e65 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -27,7 +27,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index e4737fcee7fb..34ce47823d51 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -11,7 +11,7 @@
// 'CFDictionary', 'CFSet' APIs.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/TargetInfo.h"
diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index fb05ca630b45..1c8c0d8dedda 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -16,7 +16,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -57,6 +57,9 @@ public:
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const;
+
+ void printState(raw_ostream &OS, ProgramStateRef State,
+ const char *NL, const char *Sep) const;
};
} // end anonymous namespace
@@ -144,6 +147,8 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
initBugType();
auto R = llvm::make_unique<BugReport>(*BT, "Index is out of bounds", N);
R->addRange(IdxExpr->getSourceRange());
+ bugreporter::trackExpressionValue(N, IdxExpr, *R,
+ /*EnableNullFPSuppression=*/false);
C.emitReport(std::move(R));
return;
}
@@ -166,6 +171,18 @@ ObjCContainersChecker::checkPointerEscape(ProgramStateRef State,
return State;
}
+void ObjCContainersChecker::printState(raw_ostream &OS, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ ArraySizeMapTy Map = State->get<ArraySizeMap>();
+ if (Map.isEmpty())
+ return;
+
+ OS << Sep << "ObjC container sizes :" << NL;
+ for (auto I : Map) {
+ OS << I.first << " : " << I.second << NL;
+ }
+}
+
/// Register checker.
void ento::registerObjCContainersChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersChecker>();
diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index d01c6ae6e093..d383302b2790 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
diff --git a/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
index dfd2c9afe7fb..018d3fcfceb9 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 629520437369..efa804220765 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -36,7 +36,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index fcba3b33f3e0..9058784dd345 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -72,7 +72,6 @@ public:
Satisfied(false) {}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -247,8 +246,7 @@ ObjCSuperDeallocChecker::isSuperDeallocMessage(const ObjCMethodCall &M) const {
std::shared_ptr<PathDiagnosticPiece>
SuperDeallocBRVisitor::VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
- BugReporterContext &BRC, BugReport &BR) {
+ BugReporterContext &BRC, BugReport &) {
if (Satisfied)
return nullptr;
@@ -257,7 +255,8 @@ SuperDeallocBRVisitor::VisitNode(const ExplodedNode *Succ,
bool CalledNow =
Succ->getState()->contains<CalledSuperDealloc>(ReceiverSymbol);
bool CalledBefore =
- Pred->getState()->contains<CalledSuperDealloc>(ReceiverSymbol);
+ Succ->getFirstPred()->getState()->contains<CalledSuperDealloc>(
+ ReceiverSymbol);
// Is Succ the node on which the analyzer noted that [super dealloc] was
// called on ReceiverSymbol?
diff --git a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index c6da37eac0c0..7f7b45316087 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
@@ -98,7 +98,7 @@ static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
SourceManager &SM) {
for (const auto *I : C->decls())
if (const auto *FD = dyn_cast<FunctionDecl>(I)) {
- SourceLocation L = FD->getLocStart();
+ SourceLocation L = FD->getBeginLoc();
if (SM.getFileID(L) == FID)
Scan(M, FD->getBody());
}
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index f69f3492edb1..211db392bf71 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
@@ -41,7 +41,8 @@ public:
BugReporter &BRArg) const {
BR = &BRArg;
AllowedPad =
- MGR.getAnalyzerOptions().getOptionAsInteger("AllowedPad", 24, this);
+ MGR.getAnalyzerOptions()
+ .getCheckerIntegerOption("AllowedPad", 24, this);
assert(AllowedPad >= 0 && "AllowedPad option should be non-negative");
// The calls to checkAST* from AnalysisConsumer don't
@@ -75,6 +76,20 @@ public:
if (shouldSkipDecl(RD))
return;
+ // TODO: Figure out why we are going through declarations and not only
+ // definitions.
+ if (!(RD = RD->getDefinition()))
+ return;
+
+ // This is the simplest correct case: a class with no fields and one base
+ // class. Other cases are more complicated because of how the base classes
+ // & fields might interact, so we don't bother dealing with them.
+ // TODO: Support other combinations of base classes and fields.
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->field_empty() && CXXRD->getNumBases() == 1)
+ return visitRecord(CXXRD->bases().begin()->getType()->getAsRecordDecl(),
+ PadMultiplier);
+
auto &ASTContext = RD->getASTContext();
const ASTRecordLayout &RL = ASTContext.getASTRecordLayout(RD);
assert(llvm::isPowerOf2_64(RL.getAlignment().getQuantity()));
@@ -112,12 +127,15 @@ public:
if (RT == nullptr)
return;
- // TODO: Recurse into the fields and base classes to see if any
- // of those have excess padding.
+ // TODO: Recurse into the fields to see if they have excess padding.
visitRecord(RT->getDecl(), Elts);
}
bool shouldSkipDecl(const RecordDecl *RD) const {
+ // TODO: Figure out why we are going through declarations and not only
+ // definitions.
+ if (!(RD = RD->getDefinition()))
+ return true;
auto Location = RD->getLocation();
// If the construct doesn't have a source file, then it's not something
// we want to diagnose.
@@ -132,13 +150,14 @@ public:
// Not going to attempt to optimize unions.
if (RD->isUnion())
return true;
- // How do you reorder fields if you haven't got any?
- if (RD->field_empty())
- return true;
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
// Tail padding with base classes ends up being very complicated.
- // We will skip objects with base classes for now.
- if (CXXRD->getNumBases() != 0)
+ // We will skip objects with base classes for now, unless they do not
+ // have fields.
+ // TODO: Handle more base class scenarios.
+ if (!CXXRD->field_empty() && CXXRD->getNumBases() != 0)
+ return true;
+ if (CXXRD->field_empty() && CXXRD->getNumBases() != 1)
return true;
// Virtual bases are complicated, skipping those for now.
if (CXXRD->getNumVBases() != 0)
@@ -150,6 +169,10 @@ public:
if (CXXRD->getTypeForDecl()->isInstantiationDependentType())
return true;
}
+ // How do you reorder fields if you haven't got any?
+ else if (RD->field_empty())
+ return true;
+
auto IsTrickyField = [](const FieldDecl *FD) -> bool {
// Bitfield layout is hard.
if (FD->isBitField())
@@ -237,7 +260,7 @@ public:
};
std::transform(RD->field_begin(), RD->field_end(),
std::back_inserter(Fields), GatherSizesAndAlignments);
- llvm::sort(Fields.begin(), Fields.end());
+ llvm::sort(Fields);
// This lets us skip over vptrs and non-virtual bases,
// so that we can just worry about the fields in our object.
// Note that this does cause us to miss some cases where we
@@ -323,7 +346,7 @@ public:
BR->emitReport(std::move(Report));
}
};
-}
+} // namespace
void ento::registerPaddingChecker(CheckerManager &Mgr) {
Mgr.registerChecker<PaddingChecker>();
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index 63f82b275ba2..de3a16ebc729 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -112,7 +112,7 @@ PointerArithChecker::getPointedRegion(const MemRegion *Region,
}
/// Checks whether a region is the part of an array.
-/// In case there is a dericed to base cast above the array element, the
+/// In case there is a derived to base cast above the array element, the
/// Polymorphic output value is set to true. AKind output value is set to the
/// allocation kind of the inspected region.
const MemRegion *PointerArithChecker::getArrayRegion(const MemRegion *Region,
diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 9aa5348e4c34..41490e45f241 100644
--- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 10ab952e069b..66cc37278809 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
deleted file mode 100644
index 9c85c0983723..000000000000
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ /dev/null
@@ -1,4156 +0,0 @@
-//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the methods for RetainCountChecker, which implements
-// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
-//
-//===----------------------------------------------------------------------===//
-
-#include "AllocationDiagnostics.h"
-#include "ClangSACheckers.h"
-#include "SelectorExtras.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/ParentMap.h"
-#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include <cstdarg>
-#include <utility>
-
-using namespace clang;
-using namespace ento;
-using namespace objc_retain;
-using llvm::StrInStrNoCase;
-
-//===----------------------------------------------------------------------===//
-// Adapters for FoldingSet.
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-template <> struct FoldingSetTrait<ArgEffect> {
-static inline void Profile(const ArgEffect X, FoldingSetNodeID &ID) {
- ID.AddInteger((unsigned) X);
-}
-};
-template <> struct FoldingSetTrait<RetEffect> {
- static inline void Profile(const RetEffect &X, FoldingSetNodeID &ID) {
- ID.AddInteger((unsigned) X.getKind());
- ID.AddInteger((unsigned) X.getObjKind());
-}
-};
-} // end llvm namespace
-
-//===----------------------------------------------------------------------===//
-// Reference-counting logic (typestate + counts).
-//===----------------------------------------------------------------------===//
-
-/// ArgEffects summarizes the effects of a function/method call on all of
-/// its arguments.
-typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
-
-namespace {
-class RefVal {
-public:
- enum Kind {
- Owned = 0, // Owning reference.
- NotOwned, // Reference is not owned by still valid (not freed).
- Released, // Object has been released.
- ReturnedOwned, // Returned object passes ownership to caller.
- ReturnedNotOwned, // Return object does not pass ownership to caller.
- ERROR_START,
- ErrorDeallocNotOwned, // -dealloc called on non-owned object.
- ErrorDeallocGC, // Calling -dealloc with GC enabled.
- ErrorUseAfterRelease, // Object used after released.
- ErrorReleaseNotOwned, // Release of an object that was not owned.
- ERROR_LEAK_START,
- ErrorLeak, // A memory leak due to excessive reference counts.
- ErrorLeakReturned, // A memory leak due to the returning method not having
- // the correct naming conventions.
- ErrorGCLeakReturned,
- ErrorOverAutorelease,
- ErrorReturnedNotOwned
- };
-
- /// Tracks how an object referenced by an ivar has been used.
- ///
- /// This accounts for us not knowing if an arbitrary ivar is supposed to be
- /// stored at +0 or +1.
- enum class IvarAccessHistory {
- None,
- AccessedDirectly,
- ReleasedAfterDirectAccess
- };
-
-private:
- /// The number of outstanding retains.
- unsigned Cnt;
- /// The number of outstanding autoreleases.
- unsigned ACnt;
- /// The (static) type of the object at the time we started tracking it.
- QualType T;
-
- /// The current state of the object.
- ///
- /// See the RefVal::Kind enum for possible values.
- unsigned RawKind : 5;
-
- /// The kind of object being tracked (CF or ObjC), if known.
- ///
- /// See the RetEffect::ObjKind enum for possible values.
- unsigned RawObjectKind : 2;
-
- /// True if the current state and/or retain count may turn out to not be the
- /// best possible approximation of the reference counting state.
- ///
- /// If true, the checker may decide to throw away ("override") this state
- /// in favor of something else when it sees the object being used in new ways.
- ///
- /// This setting should not be propagated to state derived from this state.
- /// Once we start deriving new states, it would be inconsistent to override
- /// them.
- unsigned RawIvarAccessHistory : 2;
-
- RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t,
- IvarAccessHistory IvarAccess)
- : Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)),
- RawObjectKind(static_cast<unsigned>(o)),
- RawIvarAccessHistory(static_cast<unsigned>(IvarAccess)) {
- assert(getKind() == k && "not enough bits for the kind");
- assert(getObjKind() == o && "not enough bits for the object kind");
- assert(getIvarAccessHistory() == IvarAccess && "not enough bits");
- }
-
-public:
- Kind getKind() const { return static_cast<Kind>(RawKind); }
-
- RetEffect::ObjKind getObjKind() const {
- return static_cast<RetEffect::ObjKind>(RawObjectKind);
- }
-
- unsigned getCount() const { return Cnt; }
- unsigned getAutoreleaseCount() const { return ACnt; }
- unsigned getCombinedCounts() const { return Cnt + ACnt; }
- void clearCounts() {
- Cnt = 0;
- ACnt = 0;
- }
- void setCount(unsigned i) {
- Cnt = i;
- }
- void setAutoreleaseCount(unsigned i) {
- ACnt = i;
- }
-
- QualType getType() const { return T; }
-
- /// Returns what the analyzer knows about direct accesses to a particular
- /// instance variable.
- ///
- /// If the object with this refcount wasn't originally from an Objective-C
- /// ivar region, this should always return IvarAccessHistory::None.
- IvarAccessHistory getIvarAccessHistory() const {
- return static_cast<IvarAccessHistory>(RawIvarAccessHistory);
- }
-
- bool isOwned() const {
- return getKind() == Owned;
- }
-
- bool isNotOwned() const {
- return getKind() == NotOwned;
- }
-
- bool isReturnedOwned() const {
- return getKind() == ReturnedOwned;
- }
-
- bool isReturnedNotOwned() const {
- return getKind() == ReturnedNotOwned;
- }
-
- /// Create a state for an object whose lifetime is the responsibility of the
- /// current function, at least partially.
- ///
- /// Most commonly, this is an owned object with a retain count of +1.
- static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
- unsigned Count = 1) {
- return RefVal(Owned, o, Count, 0, t, IvarAccessHistory::None);
- }
-
- /// Create a state for an object whose lifetime is not the responsibility of
- /// the current function.
- ///
- /// Most commonly, this is an unowned object with a retain count of +0.
- static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
- unsigned Count = 0) {
- return RefVal(NotOwned, o, Count, 0, t, IvarAccessHistory::None);
- }
-
- RefVal operator-(size_t i) const {
- return RefVal(getKind(), getObjKind(), getCount() - i,
- getAutoreleaseCount(), getType(), getIvarAccessHistory());
- }
-
- RefVal operator+(size_t i) const {
- return RefVal(getKind(), getObjKind(), getCount() + i,
- getAutoreleaseCount(), getType(), getIvarAccessHistory());
- }
-
- RefVal operator^(Kind k) const {
- return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
- getType(), getIvarAccessHistory());
- }
-
- RefVal autorelease() const {
- return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
- getType(), getIvarAccessHistory());
- }
-
- RefVal withIvarAccess() const {
- assert(getIvarAccessHistory() == IvarAccessHistory::None);
- return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
- getType(), IvarAccessHistory::AccessedDirectly);
- }
-
- RefVal releaseViaIvar() const {
- assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly);
- return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
- getType(), IvarAccessHistory::ReleasedAfterDirectAccess);
- }
-
- // Comparison, profiling, and pretty-printing.
-
- bool hasSameState(const RefVal &X) const {
- return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt &&
- getIvarAccessHistory() == X.getIvarAccessHistory();
- }
-
- bool operator==(const RefVal& X) const {
- return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind();
- }
-
- void Profile(llvm::FoldingSetNodeID& ID) const {
- ID.Add(T);
- ID.AddInteger(RawKind);
- ID.AddInteger(Cnt);
- ID.AddInteger(ACnt);
- ID.AddInteger(RawObjectKind);
- ID.AddInteger(RawIvarAccessHistory);
- }
-
- void print(raw_ostream &Out) const;
-};
-
-void RefVal::print(raw_ostream &Out) const {
- if (!T.isNull())
- Out << "Tracked " << T.getAsString() << '/';
-
- switch (getKind()) {
- default: llvm_unreachable("Invalid RefVal kind");
- case Owned: {
- Out << "Owned";
- unsigned cnt = getCount();
- if (cnt) Out << " (+ " << cnt << ")";
- break;
- }
-
- case NotOwned: {
- Out << "NotOwned";
- unsigned cnt = getCount();
- if (cnt) Out << " (+ " << cnt << ")";
- break;
- }
-
- case ReturnedOwned: {
- Out << "ReturnedOwned";
- unsigned cnt = getCount();
- if (cnt) Out << " (+ " << cnt << ")";
- break;
- }
-
- case ReturnedNotOwned: {
- Out << "ReturnedNotOwned";
- unsigned cnt = getCount();
- if (cnt) Out << " (+ " << cnt << ")";
- break;
- }
-
- case Released:
- Out << "Released";
- break;
-
- case ErrorDeallocGC:
- Out << "-dealloc (GC)";
- break;
-
- case ErrorDeallocNotOwned:
- Out << "-dealloc (not-owned)";
- break;
-
- case ErrorLeak:
- Out << "Leaked";
- break;
-
- case ErrorLeakReturned:
- Out << "Leaked (Bad naming)";
- break;
-
- case ErrorGCLeakReturned:
- Out << "Leaked (GC-ed at return)";
- break;
-
- case ErrorUseAfterRelease:
- Out << "Use-After-Release [ERROR]";
- break;
-
- case ErrorReleaseNotOwned:
- Out << "Release of Not-Owned [ERROR]";
- break;
-
- case RefVal::ErrorOverAutorelease:
- Out << "Over-autoreleased";
- break;
-
- case RefVal::ErrorReturnedNotOwned:
- Out << "Non-owned object returned instead of owned";
- break;
- }
-
- switch (getIvarAccessHistory()) {
- case IvarAccessHistory::None:
- break;
- case IvarAccessHistory::AccessedDirectly:
- Out << " [direct ivar access]";
- break;
- case IvarAccessHistory::ReleasedAfterDirectAccess:
- Out << " [released after direct ivar access]";
- }
-
- if (ACnt) {
- Out << " [autorelease -" << ACnt << ']';
- }
-}
-} //end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// RefBindings - State used to track object reference counts.
-//===----------------------------------------------------------------------===//
-
-REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
-
-static inline const RefVal *getRefBinding(ProgramStateRef State,
- SymbolRef Sym) {
- return State->get<RefBindings>(Sym);
-}
-
-static inline ProgramStateRef setRefBinding(ProgramStateRef State,
- SymbolRef Sym, RefVal Val) {
- return State->set<RefBindings>(Sym, Val);
-}
-
-static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
- return State->remove<RefBindings>(Sym);
-}
-
-//===----------------------------------------------------------------------===//
-// Function/Method behavior summaries.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class RetainSummary {
- /// Args - a map of (index, ArgEffect) pairs, where index
- /// specifies the argument (starting from 0). This can be sparsely
- /// populated; arguments with no entry in Args use 'DefaultArgEffect'.
- ArgEffects Args;
-
- /// DefaultArgEffect - The default ArgEffect to apply to arguments that
- /// do not have an entry in Args.
- ArgEffect DefaultArgEffect;
-
- /// Receiver - If this summary applies to an Objective-C message expression,
- /// this is the effect applied to the state of the receiver.
- ArgEffect Receiver;
-
- /// Ret - The effect on the return value. Used to indicate if the
- /// function/method call returns a new tracked symbol.
- RetEffect Ret;
-
-public:
- RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
- ArgEffect ReceiverEff)
- : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R) {}
-
- /// getArg - Return the argument effect on the argument specified by
- /// idx (starting from 0).
- ArgEffect getArg(unsigned idx) const {
- if (const ArgEffect *AE = Args.lookup(idx))
- return *AE;
-
- return DefaultArgEffect;
- }
-
- void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
- Args = af.add(Args, idx, e);
- }
-
- /// setDefaultArgEffect - Set the default argument effect.
- void setDefaultArgEffect(ArgEffect E) {
- DefaultArgEffect = E;
- }
-
- /// getRetEffect - Returns the effect on the return value of the call.
- RetEffect getRetEffect() const { return Ret; }
-
- /// setRetEffect - Set the effect of the return value of the call.
- void setRetEffect(RetEffect E) { Ret = E; }
-
-
- /// Sets the effect on the receiver of the message.
- void setReceiverEffect(ArgEffect e) { Receiver = e; }
-
- /// getReceiverEffect - Returns the effect on the receiver of the call.
- /// This is only meaningful if the summary applies to an ObjCMessageExpr*.
- ArgEffect getReceiverEffect() const { return Receiver; }
-
- /// Test if two retain summaries are identical. Note that merely equivalent
- /// summaries are not necessarily identical (for example, if an explicit
- /// argument effect matches the default effect).
- bool operator==(const RetainSummary &Other) const {
- return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
- Receiver == Other.Receiver && Ret == Other.Ret;
- }
-
- /// Profile this summary for inclusion in a FoldingSet.
- void Profile(llvm::FoldingSetNodeID& ID) const {
- ID.Add(Args);
- ID.Add(DefaultArgEffect);
- ID.Add(Receiver);
- ID.Add(Ret);
- }
-
- /// A retain summary is simple if it has no ArgEffects other than the default.
- bool isSimple() const {
- return Args.isEmpty();
- }
-
-private:
- ArgEffects getArgEffects() const { return Args; }
- ArgEffect getDefaultArgEffect() const { return DefaultArgEffect; }
-
- friend class RetainSummaryManager;
- friend class RetainCountChecker;
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// Data structures for constructing summaries.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class ObjCSummaryKey {
- IdentifierInfo* II;
- Selector S;
-public:
- ObjCSummaryKey(IdentifierInfo* ii, Selector s)
- : II(ii), S(s) {}
-
- ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
- : II(d ? d->getIdentifier() : nullptr), S(s) {}
-
- ObjCSummaryKey(Selector s)
- : II(nullptr), S(s) {}
-
- IdentifierInfo *getIdentifier() const { return II; }
- Selector getSelector() const { return S; }
-};
-} // end anonymous namespace
-
-namespace llvm {
-template <> struct DenseMapInfo<ObjCSummaryKey> {
- static inline ObjCSummaryKey getEmptyKey() {
- return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
- DenseMapInfo<Selector>::getEmptyKey());
- }
-
- static inline ObjCSummaryKey getTombstoneKey() {
- return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
- DenseMapInfo<Selector>::getTombstoneKey());
- }
-
- static unsigned getHashValue(const ObjCSummaryKey &V) {
- typedef std::pair<IdentifierInfo*, Selector> PairTy;
- return DenseMapInfo<PairTy>::getHashValue(PairTy(V.getIdentifier(),
- V.getSelector()));
- }
-
- static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
- return LHS.getIdentifier() == RHS.getIdentifier() &&
- LHS.getSelector() == RHS.getSelector();
- }
-
-};
-} // end llvm namespace
-
-namespace {
-class ObjCSummaryCache {
- typedef llvm::DenseMap<ObjCSummaryKey, const RetainSummary *> MapTy;
- MapTy M;
-public:
- ObjCSummaryCache() {}
-
- const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
- // Do a lookup with the (D,S) pair. If we find a match return
- // the iterator.
- ObjCSummaryKey K(D, S);
- MapTy::iterator I = M.find(K);
-
- if (I != M.end())
- return I->second;
- if (!D)
- return nullptr;
-
- // Walk the super chain. If we find a hit with a parent, we'll end
- // up returning that summary. We actually allow that key (null,S), as
- // we cache summaries for the null ObjCInterfaceDecl* to allow us to
- // generate initial summaries without having to worry about NSObject
- // being declared.
- // FIXME: We may change this at some point.
- for (ObjCInterfaceDecl *C=D->getSuperClass() ;; C=C->getSuperClass()) {
- if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
- break;
-
- if (!C)
- return nullptr;
- }
-
- // Cache the summary with original key to make the next lookup faster
- // and return the iterator.
- const RetainSummary *Summ = I->second;
- M[K] = Summ;
- return Summ;
- }
-
- const RetainSummary *find(IdentifierInfo* II, Selector S) {
- // FIXME: Class method lookup. Right now we don't have a good way
- // of going between IdentifierInfo* and the class hierarchy.
- MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
-
- if (I == M.end())
- I = M.find(ObjCSummaryKey(S));
-
- return I == M.end() ? nullptr : I->second;
- }
-
- const RetainSummary *& operator[](ObjCSummaryKey K) {
- return M[K];
- }
-
- const RetainSummary *& operator[](Selector S) {
- return M[ ObjCSummaryKey(S) ];
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// Data structures for managing collections of summaries.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class RetainSummaryManager {
-
- //==-----------------------------------------------------------------==//
- // Typedefs.
- //==-----------------------------------------------------------------==//
-
- typedef llvm::DenseMap<const FunctionDecl*, const RetainSummary *>
- FuncSummariesTy;
-
- typedef ObjCSummaryCache ObjCMethodSummariesTy;
-
- typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
-
- //==-----------------------------------------------------------------==//
- // Data.
- //==-----------------------------------------------------------------==//
-
- /// Ctx - The ASTContext object for the analyzed ASTs.
- ASTContext &Ctx;
-
- /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
- const bool GCEnabled;
-
- /// Records whether or not the analyzed code runs in ARC mode.
- const bool ARCEnabled;
-
- /// FuncSummaries - A map from FunctionDecls to summaries.
- FuncSummariesTy FuncSummaries;
-
- /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
- /// to summaries.
- ObjCMethodSummariesTy ObjCClassMethodSummaries;
-
- /// ObjCMethodSummaries - A map from selectors to summaries.
- ObjCMethodSummariesTy ObjCMethodSummaries;
-
- /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
- /// and all other data used by the checker.
- llvm::BumpPtrAllocator BPAlloc;
-
- /// AF - A factory for ArgEffects objects.
- ArgEffects::Factory AF;
-
- /// ScratchArgs - A holding buffer for construct ArgEffects.
- ArgEffects ScratchArgs;
-
- /// ObjCAllocRetE - Default return effect for methods returning Objective-C
- /// objects.
- RetEffect ObjCAllocRetE;
-
- /// ObjCInitRetE - Default return effect for init methods returning
- /// Objective-C objects.
- RetEffect ObjCInitRetE;
-
- /// SimpleSummaries - Used for uniquing summaries that don't have special
- /// effects.
- llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
-
- //==-----------------------------------------------------------------==//
- // Methods.
- //==-----------------------------------------------------------------==//
-
- /// getArgEffects - Returns a persistent ArgEffects object based on the
- /// data in ScratchArgs.
- ArgEffects getArgEffects();
-
- enum UnaryFuncKind { cfretain, cfrelease, cfautorelease, cfmakecollectable };
-
- const RetainSummary *getUnarySummary(const FunctionType* FT,
- UnaryFuncKind func);
-
- const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
- const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
- const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
-
- const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
-
- const RetainSummary *getPersistentSummary(RetEffect RetEff,
- ArgEffect ReceiverEff = DoNothing,
- ArgEffect DefaultEff = MayEscape) {
- RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
- return getPersistentSummary(Summ);
- }
-
- const RetainSummary *getDoNothingSummary() {
- return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- }
-
- const RetainSummary *getDefaultSummary() {
- return getPersistentSummary(RetEffect::MakeNoRet(),
- DoNothing, MayEscape);
- }
-
- const RetainSummary *getPersistentStopSummary() {
- return getPersistentSummary(RetEffect::MakeNoRet(),
- StopTracking, StopTracking);
- }
-
- void InitializeClassMethodSummaries();
- void InitializeMethodSummaries();
-private:
- void addNSObjectClsMethSummary(Selector S, const RetainSummary *Summ) {
- ObjCClassMethodSummaries[S] = Summ;
- }
-
- void addNSObjectMethSummary(Selector S, const RetainSummary *Summ) {
- ObjCMethodSummaries[S] = Summ;
- }
-
- void addClassMethSummary(const char* Cls, const char* name,
- const RetainSummary *Summ, bool isNullary = true) {
- IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
- Selector S = isNullary ? GetNullarySelector(name, Ctx)
- : GetUnarySelector(name, Ctx);
- ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
- }
-
- void addInstMethSummary(const char* Cls, const char* nullaryName,
- const RetainSummary *Summ) {
- IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
- Selector S = GetNullarySelector(nullaryName, Ctx);
- ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
- }
-
- template <typename... Keywords>
- void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy &Summaries,
- const RetainSummary *Summ, Keywords *... Kws) {
- Selector S = getKeywordSelector(Ctx, Kws...);
- Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
- }
-
- template <typename... Keywords>
- void addInstMethSummary(const char *Cls, const RetainSummary *Summ,
- Keywords *... Kws) {
- addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, Kws...);
- }
-
- template <typename... Keywords>
- void addClsMethSummary(const char *Cls, const RetainSummary *Summ,
- Keywords *... Kws) {
- addMethodSummary(&Ctx.Idents.get(Cls), ObjCClassMethodSummaries, Summ,
- Kws...);
- }
-
- template <typename... Keywords>
- void addClsMethSummary(IdentifierInfo *II, const RetainSummary *Summ,
- Keywords *... Kws) {
- addMethodSummary(II, ObjCClassMethodSummaries, Summ, Kws...);
- }
-
-public:
-
- RetainSummaryManager(ASTContext &ctx, bool gcenabled, bool usesARC)
- : Ctx(ctx),
- GCEnabled(gcenabled),
- ARCEnabled(usesARC),
- AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
- ObjCAllocRetE(gcenabled
- ? RetEffect::MakeGCNotOwned()
- : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
- : RetEffect::MakeOwned(RetEffect::ObjC))),
- ObjCInitRetE(gcenabled
- ? RetEffect::MakeGCNotOwned()
- : (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
- : RetEffect::MakeOwnedWhenTrackedReceiver())) {
- InitializeClassMethodSummaries();
- InitializeMethodSummaries();
- }
-
- const RetainSummary *getSummary(const CallEvent &Call,
- ProgramStateRef State = nullptr);
-
- const RetainSummary *getFunctionSummary(const FunctionDecl *FD);
-
- const RetainSummary *getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD,
- QualType RetTy,
- ObjCMethodSummariesTy &CachedSummaries);
-
- const RetainSummary *getInstanceMethodSummary(const ObjCMethodCall &M,
- ProgramStateRef State);
-
- const RetainSummary *getClassMethodSummary(const ObjCMethodCall &M) {
- assert(!M.isInstanceMessage());
- const ObjCInterfaceDecl *Class = M.getReceiverInterface();
-
- return getMethodSummary(M.getSelector(), Class, M.getDecl(),
- M.getResultType(), ObjCClassMethodSummaries);
- }
-
- /// getMethodSummary - This version of getMethodSummary is used to query
- /// the summary for the current method being analyzed.
- const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
- const ObjCInterfaceDecl *ID = MD->getClassInterface();
- Selector S = MD->getSelector();
- QualType ResultTy = MD->getReturnType();
-
- ObjCMethodSummariesTy *CachedSummaries;
- if (MD->isInstanceMethod())
- CachedSummaries = &ObjCMethodSummaries;
- else
- CachedSummaries = &ObjCClassMethodSummaries;
-
- return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
- }
-
- const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
- Selector S, QualType RetTy);
-
- /// Determine if there is a special return effect for this function or method.
- Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
- const Decl *D);
-
- void updateSummaryFromAnnotations(const RetainSummary *&Summ,
- const ObjCMethodDecl *MD);
-
- void updateSummaryFromAnnotations(const RetainSummary *&Summ,
- const FunctionDecl *FD);
-
- void updateSummaryForCall(const RetainSummary *&Summ,
- const CallEvent &Call);
-
- bool isGCEnabled() const { return GCEnabled; }
-
- bool isARCEnabled() const { return ARCEnabled; }
-
- bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
-
- RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
-
- friend class RetainSummaryTemplate;
-};
-
-// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
-// summaries. If a function or method looks like it has a default summary, but
-// it has annotations, the annotations are added to the stack-based template
-// and then copied into managed memory.
-class RetainSummaryTemplate {
- RetainSummaryManager &Manager;
- const RetainSummary *&RealSummary;
- RetainSummary ScratchSummary;
- bool Accessed;
-public:
- RetainSummaryTemplate(const RetainSummary *&real, RetainSummaryManager &mgr)
- : Manager(mgr), RealSummary(real), ScratchSummary(*real), Accessed(false) {}
-
- ~RetainSummaryTemplate() {
- if (Accessed)
- RealSummary = Manager.getPersistentSummary(ScratchSummary);
- }
-
- RetainSummary &operator*() {
- Accessed = true;
- return ScratchSummary;
- }
-
- RetainSummary *operator->() {
- Accessed = true;
- return &ScratchSummary;
- }
-};
-
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// Implementation of checker data structures.
-//===----------------------------------------------------------------------===//
-
-ArgEffects RetainSummaryManager::getArgEffects() {
- ArgEffects AE = ScratchArgs;
- ScratchArgs = AF.getEmptyMap();
- return AE;
-}
-
-const RetainSummary *
-RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
- // Unique "simple" summaries -- those without ArgEffects.
- if (OldSumm.isSimple()) {
- llvm::FoldingSetNodeID ID;
- OldSumm.Profile(ID);
-
- void *Pos;
- CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
-
- if (!N) {
- N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
- new (N) CachedSummaryNode(OldSumm);
- SimpleSummaries.InsertNode(N, Pos);
- }
-
- return &N->getValue();
- }
-
- RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
- new (Summ) RetainSummary(OldSumm);
- return Summ;
-}
-
-//===----------------------------------------------------------------------===//
-// Summary creation for functions (largely uses of Core Foundation).
-//===----------------------------------------------------------------------===//
-
-static bool isRetain(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("retain") || FName.endswith_lower("retain");
-}
-
-static bool isRelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("release") || FName.endswith_lower("release");
-}
-
-static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("autorelease") ||
- FName.endswith_lower("autorelease");
-}
-
-static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
- // FIXME: Remove FunctionDecl parameter.
- // FIXME: Is it really okay if MakeCollectable isn't a suffix?
- return FName.find_lower("MakeCollectable") != StringRef::npos;
-}
-
-static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
- switch (E) {
- case DoNothing:
- case Autorelease:
- case DecRefBridgedTransferred:
- case IncRef:
- case IncRefMsg:
- case MakeCollectable:
- case UnretainedOutParameter:
- case RetainedOutParameter:
- case MayEscape:
- case StopTracking:
- case StopTrackingHard:
- return StopTrackingHard;
- case DecRef:
- case DecRefAndStopTrackingHard:
- return DecRefAndStopTrackingHard;
- case DecRefMsg:
- case DecRefMsgAndStopTrackingHard:
- return DecRefMsgAndStopTrackingHard;
- case Dealloc:
- return Dealloc;
- }
-
- llvm_unreachable("Unknown ArgEffect kind");
-}
-
-void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
- const CallEvent &Call) {
- if (Call.hasNonZeroCallbackArg()) {
- ArgEffect RecEffect =
- getStopTrackingHardEquivalent(S->getReceiverEffect());
- ArgEffect DefEffect =
- getStopTrackingHardEquivalent(S->getDefaultArgEffect());
-
- ArgEffects CustomArgEffects = S->getArgEffects();
- for (ArgEffects::iterator I = CustomArgEffects.begin(),
- E = CustomArgEffects.end();
- I != E; ++I) {
- ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
- if (Translated != DefEffect)
- ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
- }
-
- RetEffect RE = RetEffect::MakeNoRetHard();
-
- // Special cases where the callback argument CANNOT free the return value.
- // This can generally only happen if we know that the callback will only be
- // called when the return value is already being deallocated.
- if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
- if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
- // When the CGBitmapContext is deallocated, the callback here will free
- // the associated data buffer.
- // The callback in dispatch_data_create frees the buffer, but not
- // the data object.
- if (Name->isStr("CGBitmapContextCreateWithData") ||
- Name->isStr("dispatch_data_create"))
- RE = S->getRetEffect();
- }
- }
-
- S = getPersistentSummary(RE, RecEffect, DefEffect);
- }
-
- // Special case '[super init];' and '[self init];'
- //
- // Even though calling '[super init]' without assigning the result to self
- // and checking if the parent returns 'nil' is a bad pattern, it is common.
- // Additionally, our Self Init checker already warns about it. To avoid
- // overwhelming the user with messages from both checkers, we model the case
- // of '[super init]' in cases when it is not consumed by another expression
- // as if the call preserves the value of 'self'; essentially, assuming it can
- // never fail and return 'nil'.
- // Note, we don't want to just stop tracking the value since we want the
- // RetainCount checker to report leaks and use-after-free if SelfInit checker
- // is turned off.
- if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
- if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
-
- // Check if the message is not consumed, we know it will not be used in
- // an assignment, ex: "self = [super init]".
- const Expr *ME = MC->getOriginExpr();
- const LocationContext *LCtx = MC->getLocationContext();
- ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
- if (!PM.isConsumedExpr(ME)) {
- RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
- ModifiableSummaryTemplate->setReceiverEffect(DoNothing);
- ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
- }
- }
- }
-}
-
-const RetainSummary *
-RetainSummaryManager::getSummary(const CallEvent &Call,
- ProgramStateRef State) {
- const RetainSummary *Summ;
- switch (Call.getKind()) {
- case CE_Function:
- Summ = getFunctionSummary(cast<SimpleFunctionCall>(Call).getDecl());
- break;
- case CE_CXXMember:
- case CE_CXXMemberOperator:
- case CE_Block:
- case CE_CXXConstructor:
- case CE_CXXDestructor:
- case CE_CXXAllocator:
- // FIXME: These calls are currently unsupported.
- return getPersistentStopSummary();
- case CE_ObjCMessage: {
- const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
- if (Msg.isInstanceMessage())
- Summ = getInstanceMethodSummary(Msg, State);
- else
- Summ = getClassMethodSummary(Msg);
- break;
- }
- }
-
- updateSummaryForCall(Summ, Call);
-
- assert(Summ && "Unknown call type?");
- return Summ;
-}
-
-const RetainSummary *
-RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
- // If we don't know what function we're calling, use our default summary.
- if (!FD)
- return getDefaultSummary();
-
- // Look up a summary in our cache of FunctionDecls -> Summaries.
- FuncSummariesTy::iterator I = FuncSummaries.find(FD);
- if (I != FuncSummaries.end())
- return I->second;
-
- // No summary? Generate one.
- const RetainSummary *S = nullptr;
- bool AllowAnnotations = true;
-
- do {
- // We generate "stop" summaries for implicitly defined functions.
- if (FD->isImplicit()) {
- S = getPersistentStopSummary();
- break;
- }
-
- // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
- // function's type.
- const FunctionType* FT = FD->getType()->getAs<FunctionType>();
- const IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- break;
-
- StringRef FName = II->getName();
-
- // Strip away preceding '_'. Doing this here will effect all the checks
- // down below.
- FName = FName.substr(FName.find_first_not_of('_'));
-
- // Inspect the result type.
- QualType RetTy = FT->getReturnType();
- std::string RetTyName = RetTy.getAsString();
-
- // FIXME: This should all be refactored into a chain of "summary lookup"
- // filters.
- assert(ScratchArgs.isEmpty());
-
- if (FName == "pthread_create" || FName == "pthread_setspecific") {
- // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
- // This will be addressed better with IPA.
- S = getPersistentStopSummary();
- } else if (FName == "NSMakeCollectable") {
- // Handle: id NSMakeCollectable(CFTypeRef)
- S = (RetTy->isObjCIdType())
- ? getUnarySummary(FT, cfmakecollectable)
- : getPersistentStopSummary();
- // The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
- // but we can fully model NSMakeCollectable ourselves.
- AllowAnnotations = false;
- } else if (FName == "CFPlugInInstanceCreate") {
- S = getPersistentSummary(RetEffect::MakeNoRet());
- } else if (FName == "IORegistryEntrySearchCFProperty"
- || (RetTyName == "CFMutableDictionaryRef" && (
- FName == "IOBSDNameMatching" ||
- FName == "IOServiceMatching" ||
- FName == "IOServiceNameMatching" ||
- FName == "IORegistryEntryIDMatching" ||
- FName == "IOOpenFirmwarePathMatching"
- ))) {
- // Part of <rdar://problem/6961230>. (IOKit)
- // This should be addressed using a API table.
- S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF),
- DoNothing, DoNothing);
- } else if (FName == "IOServiceGetMatchingService" ||
- FName == "IOServiceGetMatchingServices") {
- // FIXES: <rdar://problem/6326900>
- // This should be addressed using a API table. This strcmp is also
- // a little gross, but there is no need to super optimize here.
- ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "IOServiceAddNotification" ||
- FName == "IOServiceAddMatchingNotification") {
- // Part of <rdar://problem/6961230>. (IOKit)
- // This should be addressed using a API table.
- ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "CVPixelBufferCreateWithBytes") {
- // FIXES: <rdar://problem/7283567>
- // Eventually this can be improved by recognizing that the pixel
- // buffer passed to CVPixelBufferCreateWithBytes is released via
- // a callback and doing full IPA to make sure this is done correctly.
- // FIXME: This function has an out parameter that returns an
- // allocated object.
- ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "CGBitmapContextCreateWithData") {
- // FIXES: <rdar://problem/7358899>
- // Eventually this can be improved by recognizing that 'releaseInfo'
- // passed to CGBitmapContextCreateWithData is released via
- // a callback and doing full IPA to make sure this is done correctly.
- ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
- S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF),
- DoNothing, DoNothing);
- } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
- // FIXES: <rdar://problem/7283567>
- // Eventually this can be improved by recognizing that the pixel
- // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
- // via a callback and doing full IPA to make sure this is done
- // correctly.
- ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "VTCompressionSessionEncodeFrame") {
- // The context argument passed to VTCompressionSessionEncodeFrame()
- // is passed to the callback specified when creating the session
- // (e.g. with VTCompressionSessionCreate()) which can release it.
- // To account for this possibility, conservatively stop tracking
- // the context.
- ScratchArgs = AF.add(ScratchArgs, 5, StopTracking);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName == "dispatch_set_context" ||
- FName == "xpc_connection_set_context") {
- // <rdar://problem/11059275> - The analyzer currently doesn't have
- // a good way to reason about the finalizer function for libdispatch.
- // If we pass a context object that is memory managed, stop tracking it.
- // <rdar://problem/13783514> - Same problem, but for XPC.
- // FIXME: this hack should possibly go away once we can handle
- // libdispatch and XPC finalizers.
- ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- } else if (FName.startswith("NSLog")) {
- S = getDoNothingSummary();
- } else if (FName.startswith("NS") &&
- (FName.find("Insert") != StringRef::npos)) {
- // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
- // be deallocated by NSMapRemove. (radar://11152419)
- ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
- ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
- }
-
- // Did we get a summary?
- if (S)
- break;
-
- if (RetTy->isPointerType()) {
- // For CoreFoundation ('CF') types.
- if (cocoa::isRefType(RetTy, "CF", FName)) {
- if (isRetain(FD, FName)) {
- S = getUnarySummary(FT, cfretain);
- // CFRetain isn't supposed to be annotated. However, this may as well
- // be a user-made "safe" CFRetain function that is incorrectly
- // annotated as cf_returns_retained due to lack of better options.
- // We want to ignore such annotation.
- AllowAnnotations = false;
- } else if (isAutorelease(FD, FName)) {
- S = getUnarySummary(FT, cfautorelease);
- // The headers use cf_consumed, but we can fully model CFAutorelease
- // ourselves.
- AllowAnnotations = false;
- } else if (isMakeCollectable(FD, FName)) {
- S = getUnarySummary(FT, cfmakecollectable);
- AllowAnnotations = false;
- } else {
- S = getCFCreateGetRuleSummary(FD);
- }
-
- break;
- }
-
- // For CoreGraphics ('CG') and CoreVideo ('CV') types.
- if (cocoa::isRefType(RetTy, "CG", FName) ||
- cocoa::isRefType(RetTy, "CV", FName)) {
- if (isRetain(FD, FName))
- S = getUnarySummary(FT, cfretain);
- else
- S = getCFCreateGetRuleSummary(FD);
-
- break;
- }
-
- // For all other CF-style types, use the Create/Get
- // rule for summaries but don't support Retain functions
- // with framework-specific prefixes.
- if (coreFoundation::isCFObjectRef(RetTy)) {
- S = getCFCreateGetRuleSummary(FD);
- break;
- }
-
- if (FD->hasAttr<CFAuditedTransferAttr>()) {
- S = getCFCreateGetRuleSummary(FD);
- break;
- }
-
- break;
- }
-
- // Check for release functions, the only kind of functions that we care
- // about that don't return a pointer type.
- if (FName.size() >= 2 &&
- FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
- // Test for 'CGCF'.
- FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
-
- if (isRelease(FD, FName))
- S = getUnarySummary(FT, cfrelease);
- else {
- assert (ScratchArgs.isEmpty());
- // Remaining CoreFoundation and CoreGraphics functions.
- // We use to assume that they all strictly followed the ownership idiom
- // and that ownership cannot be transferred. While this is technically
- // correct, many methods allow a tracked object to escape. For example:
- //
- // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
- // CFDictionaryAddValue(y, key, x);
- // CFRelease(x);
- // ... it is okay to use 'x' since 'y' has a reference to it
- //
- // We handle this and similar cases with the follow heuristic. If the
- // function name contains "InsertValue", "SetValue", "AddValue",
- // "AppendValue", or "SetAttribute", then we assume that arguments may
- // "escape." This means that something else holds on to the object,
- // allowing it be used even after its local retain count drops to 0.
- ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
- StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
- StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
- StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
- StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
- ? MayEscape : DoNothing;
-
- S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
- }
- }
- }
- while (0);
-
- // If we got all the way here without any luck, use a default summary.
- if (!S)
- S = getDefaultSummary();
-
- // Annotations override defaults.
- if (AllowAnnotations)
- updateSummaryFromAnnotations(S, FD);
-
- FuncSummaries[FD] = S;
- return S;
-}
-
-const RetainSummary *
-RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
- if (coreFoundation::followsCreateRule(FD))
- return getCFSummaryCreateRule(FD);
-
- return getCFSummaryGetRule(FD);
-}
-
-const RetainSummary *
-RetainSummaryManager::getUnarySummary(const FunctionType* FT,
- UnaryFuncKind func) {
-
- // Sanity check that this is *really* a unary function. This can
- // happen if people do weird things.
- const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
- if (!FTP || FTP->getNumParams() != 1)
- return getPersistentStopSummary();
-
- assert (ScratchArgs.isEmpty());
-
- ArgEffect Effect;
- switch (func) {
- case cfretain: Effect = IncRef; break;
- case cfrelease: Effect = DecRef; break;
- case cfautorelease: Effect = Autorelease; break;
- case cfmakecollectable: Effect = MakeCollectable; break;
- }
-
- ScratchArgs = AF.add(ScratchArgs, 0, Effect);
- return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
-}
-
-const RetainSummary *
-RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
- assert (ScratchArgs.isEmpty());
-
- return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF));
-}
-
-const RetainSummary *
-RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
- assert (ScratchArgs.isEmpty());
- return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
- DoNothing, DoNothing);
-}
-
-/// Returns true if the declaration 'D' is annotated with 'rcAnnotation'.
-static bool hasRCAnnotation(const Decl *D, StringRef rcAnnotation) {
- for (const auto *Ann : D->specific_attrs<AnnotateAttr>()) {
- if (Ann->getAnnotation() == rcAnnotation)
- return true;
- }
- return false;
-}
-
-/// Returns true if the function declaration 'FD' contains
-/// 'rc_ownership_trusted_implementation' annotate attribute.
-static bool isTrustedReferenceCountImplementation(const FunctionDecl *FD) {
- return hasRCAnnotation(FD, "rc_ownership_trusted_implementation");
-}
-
-static bool isGeneralizedObjectRef(QualType Ty) {
- if (Ty.getAsString().substr(0, 4) == "isl_")
- return true;
- else
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Summary creation for Selectors.
-//===----------------------------------------------------------------------===//
-
-Optional<RetEffect>
-RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
- const Decl *D) {
- if (cocoa::isCocoaObjectRef(RetTy)) {
- if (D->hasAttr<NSReturnsRetainedAttr>())
- return ObjCAllocRetE;
-
- if (D->hasAttr<NSReturnsNotRetainedAttr>() ||
- D->hasAttr<NSReturnsAutoreleasedAttr>())
- return RetEffect::MakeNotOwned(RetEffect::ObjC);
-
- } else if (!RetTy->isPointerType()) {
- return None;
- }
-
- if (D->hasAttr<CFReturnsRetainedAttr>())
- return RetEffect::MakeOwned(RetEffect::CF);
- else if (hasRCAnnotation(D, "rc_ownership_returns_retained"))
- return RetEffect::MakeOwned(RetEffect::Generalized);
-
- if (D->hasAttr<CFReturnsNotRetainedAttr>())
- return RetEffect::MakeNotOwned(RetEffect::CF);
-
- return None;
-}
-
-void
-RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
- const FunctionDecl *FD) {
- if (!FD)
- return;
-
- assert(Summ && "Must have a summary to add annotations to.");
- RetainSummaryTemplate Template(Summ, *this);
-
- // Effects on the parameters.
- unsigned parm_idx = 0;
- for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
- pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
- const ParmVarDecl *pd = *pi;
- if (pd->hasAttr<NSConsumedAttr>())
- Template->addArg(AF, parm_idx, DecRefMsg);
- else if (pd->hasAttr<CFConsumedAttr>() ||
- hasRCAnnotation(pd, "rc_ownership_consumed"))
- Template->addArg(AF, parm_idx, DecRef);
- else if (pd->hasAttr<CFReturnsRetainedAttr>() ||
- hasRCAnnotation(pd, "rc_ownership_returns_retained")) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, RetainedOutParameter);
- } else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, UnretainedOutParameter);
- }
- }
-
- QualType RetTy = FD->getReturnType();
- if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
- Template->setRetEffect(*RetE);
-}
-
-void
-RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
- const ObjCMethodDecl *MD) {
- if (!MD)
- return;
-
- assert(Summ && "Must have a valid summary to add annotations to");
- RetainSummaryTemplate Template(Summ, *this);
-
- // Effects on the receiver.
- if (MD->hasAttr<NSConsumesSelfAttr>())
- Template->setReceiverEffect(DecRefMsg);
-
- // Effects on the parameters.
- unsigned parm_idx = 0;
- for (ObjCMethodDecl::param_const_iterator
- pi=MD->param_begin(), pe=MD->param_end();
- pi != pe; ++pi, ++parm_idx) {
- const ParmVarDecl *pd = *pi;
- if (pd->hasAttr<NSConsumedAttr>())
- Template->addArg(AF, parm_idx, DecRefMsg);
- else if (pd->hasAttr<CFConsumedAttr>()) {
- Template->addArg(AF, parm_idx, DecRef);
- } else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, RetainedOutParameter);
- } else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
- QualType PointeeTy = pd->getType()->getPointeeType();
- if (!PointeeTy.isNull())
- if (coreFoundation::isCFObjectRef(PointeeTy))
- Template->addArg(AF, parm_idx, UnretainedOutParameter);
- }
- }
-
- QualType RetTy = MD->getReturnType();
- if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
- Template->setRetEffect(*RetE);
-}
-
-const RetainSummary *
-RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
- Selector S, QualType RetTy) {
- // Any special effects?
- ArgEffect ReceiverEff = DoNothing;
- RetEffect ResultEff = RetEffect::MakeNoRet();
-
- // Check the method family, and apply any default annotations.
- switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
- case OMF_None:
- case OMF_initialize:
- case OMF_performSelector:
- // Assume all Objective-C methods follow Cocoa Memory Management rules.
- // FIXME: Does the non-threaded performSelector family really belong here?
- // The selector could be, say, @selector(copy).
- if (cocoa::isCocoaObjectRef(RetTy))
- ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
- else if (coreFoundation::isCFObjectRef(RetTy)) {
- // ObjCMethodDecl currently doesn't consider CF objects as valid return
- // values for alloc, new, copy, or mutableCopy, so we have to
- // double-check with the selector. This is ugly, but there aren't that
- // many Objective-C methods that return CF objects, right?
- if (MD) {
- switch (S.getMethodFamily()) {
- case OMF_alloc:
- case OMF_new:
- case OMF_copy:
- case OMF_mutableCopy:
- ResultEff = RetEffect::MakeOwned(RetEffect::CF);
- break;
- default:
- ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
- break;
- }
- } else {
- ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
- }
- }
- break;
- case OMF_init:
- ResultEff = ObjCInitRetE;
- ReceiverEff = DecRefMsg;
- break;
- case OMF_alloc:
- case OMF_new:
- case OMF_copy:
- case OMF_mutableCopy:
- if (cocoa::isCocoaObjectRef(RetTy))
- ResultEff = ObjCAllocRetE;
- else if (coreFoundation::isCFObjectRef(RetTy))
- ResultEff = RetEffect::MakeOwned(RetEffect::CF);
- break;
- case OMF_autorelease:
- ReceiverEff = Autorelease;
- break;
- case OMF_retain:
- ReceiverEff = IncRefMsg;
- break;
- case OMF_release:
- ReceiverEff = DecRefMsg;
- break;
- case OMF_dealloc:
- ReceiverEff = Dealloc;
- break;
- case OMF_self:
- // -self is handled specially by the ExprEngine to propagate the receiver.
- break;
- case OMF_retainCount:
- case OMF_finalize:
- // These methods don't return objects.
- break;
- }
-
- // If one of the arguments in the selector has the keyword 'delegate' we
- // should stop tracking the reference count for the receiver. This is
- // because the reference count is quite possibly handled by a delegate
- // method.
- if (S.isKeywordSelector()) {
- for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
- StringRef Slot = S.getNameForSlot(i);
- if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
- if (ResultEff == ObjCInitRetE)
- ResultEff = RetEffect::MakeNoRetHard();
- else
- ReceiverEff = StopTrackingHard;
- }
- }
- }
-
- if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
- ResultEff.getKind() == RetEffect::NoRet)
- return getDefaultSummary();
-
- return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
-}
-
-const RetainSummary *
-RetainSummaryManager::getInstanceMethodSummary(const ObjCMethodCall &Msg,
- ProgramStateRef State) {
- const ObjCInterfaceDecl *ReceiverClass = nullptr;
-
- // We do better tracking of the type of the object than the core ExprEngine.
- // See if we have its type in our private state.
- // FIXME: Eventually replace the use of state->get<RefBindings> with
- // a generic API for reasoning about the Objective-C types of symbolic
- // objects.
- SVal ReceiverV = Msg.getReceiverSVal();
- if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
- if (const RefVal *T = getRefBinding(State, Sym))
- if (const ObjCObjectPointerType *PT =
- T->getType()->getAs<ObjCObjectPointerType>())
- ReceiverClass = PT->getInterfaceDecl();
-
- // If we don't know what kind of object this is, fall back to its static type.
- if (!ReceiverClass)
- ReceiverClass = Msg.getReceiverInterface();
-
- // FIXME: The receiver could be a reference to a class, meaning that
- // we should use the class method.
- // id x = [NSObject class];
- // [x performSelector:... withObject:... afterDelay:...];
- Selector S = Msg.getSelector();
- const ObjCMethodDecl *Method = Msg.getDecl();
- if (!Method && ReceiverClass)
- Method = ReceiverClass->getInstanceMethod(S);
-
- return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
- ObjCMethodSummaries);
-}
-
-const RetainSummary *
-RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
- const ObjCMethodDecl *MD, QualType RetTy,
- ObjCMethodSummariesTy &CachedSummaries) {
-
- // Look up a summary in our summary cache.
- const RetainSummary *Summ = CachedSummaries.find(ID, S);
-
- if (!Summ) {
- Summ = getStandardMethodSummary(MD, S, RetTy);
-
- // Annotations override defaults.
- updateSummaryFromAnnotations(Summ, MD);
-
- // Memoize the summary.
- CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
- }
-
- return Summ;
-}
-
-void RetainSummaryManager::InitializeClassMethodSummaries() {
- assert(ScratchArgs.isEmpty());
- // Create the [NSAssertionHandler currentHander] summary.
- addClassMethSummary("NSAssertionHandler", "currentHandler",
- getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
-
- // Create the [NSAutoreleasePool addObject:] summary.
- ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
- addClassMethSummary("NSAutoreleasePool", "addObject",
- getPersistentSummary(RetEffect::MakeNoRet(),
- DoNothing, Autorelease));
-}
-
-void RetainSummaryManager::InitializeMethodSummaries() {
-
- assert (ScratchArgs.isEmpty());
-
- // Create the "init" selector. It just acts as a pass-through for the
- // receiver.
- const RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
- addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
-
- // awakeAfterUsingCoder: behaves basically like an 'init' method. It
- // claims the receiver and returns a retained object.
- addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
- InitSumm);
-
- // The next methods are allocators.
- const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
- const RetainSummary *CFAllocSumm =
- getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF));
-
- // Create the "retain" selector.
- RetEffect NoRet = RetEffect::MakeNoRet();
- const RetainSummary *Summ = getPersistentSummary(NoRet, IncRefMsg);
- addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
-
- // Create the "release" selector.
- Summ = getPersistentSummary(NoRet, DecRefMsg);
- addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
-
- // Create the -dealloc summary.
- Summ = getPersistentSummary(NoRet, Dealloc);
- addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
-
- // Create the "autorelease" selector.
- Summ = getPersistentSummary(NoRet, Autorelease);
- addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
-
- // For NSWindow, allocated objects are (initially) self-owned.
- // FIXME: For now we opt for false negatives with NSWindow, as these objects
- // self-own themselves. However, they only do this once they are displayed.
- // Thus, we need to track an NSWindow's display status.
- // This is tracked in <rdar://problem/6062711>.
- // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
- const RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
- StopTracking,
- StopTracking);
-
- addClassMethSummary("NSWindow", "alloc", NoTrackYet);
-
- // For NSPanel (which subclasses NSWindow), allocated objects are not
- // self-owned.
- // FIXME: For now we don't track NSPanels. object for the same reason
- // as for NSWindow objects.
- addClassMethSummary("NSPanel", "alloc", NoTrackYet);
-
- // For NSNull, objects returned by +null are singletons that ignore
- // retain/release semantics. Just don't track them.
- // <rdar://problem/12858915>
- addClassMethSummary("NSNull", "null", NoTrackYet);
-
- // Don't track allocated autorelease pools, as it is okay to prematurely
- // exit a method.
- addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
- addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
- addClassMethSummary("NSAutoreleasePool", "new", NoTrackYet);
-
- // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
- addInstMethSummary("QCRenderer", AllocSumm, "createSnapshotImageOfType");
- addInstMethSummary("QCView", AllocSumm, "createSnapshotImageOfType");
-
- // Create summaries for CIContext, 'createCGImage' and
- // 'createCGLayerWithSize'. These objects are CF objects, and are not
- // automatically garbage collected.
- addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect");
- addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect",
- "format", "colorSpace");
- addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info");
-}
-
-//===----------------------------------------------------------------------===//
-// Error reporting.
-//===----------------------------------------------------------------------===//
-namespace {
- typedef llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
- SummaryLogTy;
-
- //===-------------===//
- // Bug Descriptions. //
- //===-------------===//
-
- class CFRefBug : public BugType {
- protected:
- CFRefBug(const CheckerBase *checker, StringRef name)
- : BugType(checker, name, categories::MemoryCoreFoundationObjectiveC) {}
-
- public:
-
- // FIXME: Eventually remove.
- virtual const char *getDescription() const = 0;
-
- virtual bool isLeak() const { return false; }
- };
-
- class UseAfterRelease : public CFRefBug {
- public:
- UseAfterRelease(const CheckerBase *checker)
- : CFRefBug(checker, "Use-after-release") {}
-
- const char *getDescription() const override {
- return "Reference-counted object is used after it is released";
- }
- };
-
- class BadRelease : public CFRefBug {
- public:
- BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
-
- const char *getDescription() const override {
- return "Incorrect decrement of the reference count of an object that is "
- "not owned at this point by the caller";
- }
- };
-
- class DeallocGC : public CFRefBug {
- public:
- DeallocGC(const CheckerBase *checker)
- : CFRefBug(checker, "-dealloc called while using garbage collection") {}
-
- const char *getDescription() const override {
- return "-dealloc called while using garbage collection";
- }
- };
-
- class DeallocNotOwned : public CFRefBug {
- public:
- DeallocNotOwned(const CheckerBase *checker)
- : CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
-
- const char *getDescription() const override {
- return "-dealloc sent to object that may be referenced elsewhere";
- }
- };
-
- class OverAutorelease : public CFRefBug {
- public:
- OverAutorelease(const CheckerBase *checker)
- : CFRefBug(checker, "Object autoreleased too many times") {}
-
- const char *getDescription() const override {
- return "Object autoreleased too many times";
- }
- };
-
- class ReturnedNotOwnedForOwned : public CFRefBug {
- public:
- ReturnedNotOwnedForOwned(const CheckerBase *checker)
- : CFRefBug(checker, "Method should return an owned object") {}
-
- const char *getDescription() const override {
- return "Object with a +0 retain count returned to caller where a +1 "
- "(owning) retain count is expected";
- }
- };
-
- class Leak : public CFRefBug {
- public:
- Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
- // Leaks should not be reported if they are post-dominated by a sink.
- setSuppressOnSink(true);
- }
-
- const char *getDescription() const override { return ""; }
-
- bool isLeak() const override { return true; }
- };
-
- //===---------===//
- // Bug Reports. //
- //===---------===//
- class CFRefReportVisitor : public BugReporterVisitor {
- protected:
- SymbolRef Sym;
- const SummaryLogTy &SummaryLog;
- bool GCEnabled;
-
- public:
- CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
- : Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
-
- void Profile(llvm::FoldingSetNodeID &ID) const override {
- static int x = 0;
- ID.AddPointer(&x);
- ID.AddPointer(Sym);
- }
-
- std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override;
-
- std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
- };
-
- class CFRefLeakReportVisitor : public CFRefReportVisitor {
- public:
- CFRefLeakReportVisitor(SymbolRef sym, bool GCEnabled,
- const SummaryLogTy &log)
- : CFRefReportVisitor(sym, GCEnabled, log) {}
-
- std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
- const ExplodedNode *N,
- BugReport &BR) override;
- };
-
- class CFRefReport : public BugReport {
- void addGCModeDescription(const LangOptions &LOpts, bool GCEnabled);
-
- public:
- CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
- const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- bool registerVisitor = true)
- : BugReport(D, D.getDescription(), n) {
- if (registerVisitor)
- addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
- addGCModeDescription(LOpts, GCEnabled);
- }
-
- CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
- const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- StringRef endText)
- : BugReport(D, D.getDescription(), endText, n) {
- addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
- addGCModeDescription(LOpts, GCEnabled);
- }
-
- llvm::iterator_range<ranges_iterator> getRanges() override {
- const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
- if (!BugTy.isLeak())
- return BugReport::getRanges();
- return llvm::make_range(ranges_iterator(), ranges_iterator());
- }
- };
-
- class CFRefLeakReport : public CFRefReport {
- const MemRegion* AllocBinding;
- const Stmt *AllocStmt;
-
- // Finds the function declaration where a leak warning for the parameter 'sym' should be raised.
- void deriveParamLocation(CheckerContext &Ctx, SymbolRef sym);
- // Finds the location where a leak warning for 'sym' should be raised.
- void deriveAllocLocation(CheckerContext &Ctx, SymbolRef sym);
- // Produces description of a leak warning which is printed on the console.
- void createDescription(CheckerContext &Ctx, bool GCEnabled, bool IncludeAllocationLine);
-
- public:
- CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
- const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx,
- bool IncludeAllocationLine);
-
- PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
- assert(Location.isValid());
- return Location;
- }
- };
-} // end anonymous namespace
-
-void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
- bool GCEnabled) {
- const char *GCModeDescription = nullptr;
-
- switch (LOpts.getGC()) {
- case LangOptions::GCOnly:
- assert(GCEnabled);
- GCModeDescription = "Code is compiled to only use garbage collection";
- break;
-
- case LangOptions::NonGC:
- assert(!GCEnabled);
- GCModeDescription = "Code is compiled to use reference counts";
- break;
-
- case LangOptions::HybridGC:
- if (GCEnabled) {
- GCModeDescription = "Code is compiled to use either garbage collection "
- "(GC) or reference counts (non-GC). The bug occurs "
- "with GC enabled";
- break;
- } else {
- GCModeDescription = "Code is compiled to use either garbage collection "
- "(GC) or reference counts (non-GC). The bug occurs "
- "in non-GC mode";
- break;
- }
- }
-
- assert(GCModeDescription && "invalid/unknown GC mode");
- addExtraText(GCModeDescription);
-}
-
-static bool isNumericLiteralExpression(const Expr *E) {
- // FIXME: This set of cases was copied from SemaExprObjC.
- return isa<IntegerLiteral>(E) ||
- isa<CharacterLiteral>(E) ||
- isa<FloatingLiteral>(E) ||
- isa<ObjCBoolLiteralExpr>(E) ||
- isa<CXXBoolLiteralExpr>(E);
-}
-
-static Optional<std::string> describeRegion(const MemRegion *MR) {
- if (const auto *VR = dyn_cast_or_null<VarRegion>(MR))
- return std::string(VR->getDecl()->getName());
- // Once we support more storage locations for bindings,
- // this would need to be improved.
- return None;
-}
-
-/// Returns true if this stack frame is for an Objective-C method that is a
-/// property getter or setter whose body has been synthesized by the analyzer.
-static bool isSynthesizedAccessor(const StackFrameContext *SFC) {
- auto Method = dyn_cast_or_null<ObjCMethodDecl>(SFC->getDecl());
- if (!Method || !Method->isPropertyAccessor())
- return false;
-
- return SFC->getAnalysisDeclContext()->isBodyAutosynthesized();
-}
-
-std::shared_ptr<PathDiagnosticPiece>
-CFRefReportVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN,
- BugReporterContext &BRC, BugReport &BR) {
- // FIXME: We will eventually need to handle non-statement-based events
- // (__attribute__((cleanup))).
- if (!N->getLocation().getAs<StmtPoint>())
- return nullptr;
-
- // Check if the type state has changed.
- ProgramStateRef PrevSt = PrevN->getState();
- ProgramStateRef CurrSt = N->getState();
- const LocationContext *LCtx = N->getLocationContext();
-
- const RefVal* CurrT = getRefBinding(CurrSt, Sym);
- if (!CurrT) return nullptr;
-
- const RefVal &CurrV = *CurrT;
- const RefVal *PrevT = getRefBinding(PrevSt, Sym);
-
- // Create a string buffer to constain all the useful things we want
- // to tell the user.
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- // This is the allocation site since the previous node had no bindings
- // for this symbol.
- if (!PrevT) {
- const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
-
- if (isa<ObjCIvarRefExpr>(S) &&
- isSynthesizedAccessor(LCtx->getStackFrame())) {
- S = LCtx->getStackFrame()->getCallSite();
- }
-
- if (isa<ObjCArrayLiteral>(S)) {
- os << "NSArray literal is an object with a +0 retain count";
- }
- else if (isa<ObjCDictionaryLiteral>(S)) {
- os << "NSDictionary literal is an object with a +0 retain count";
- }
- else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
- if (isNumericLiteralExpression(BL->getSubExpr()))
- os << "NSNumber literal is an object with a +0 retain count";
- else {
- const ObjCInterfaceDecl *BoxClass = nullptr;
- if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
- BoxClass = Method->getClassInterface();
-
- // We should always be able to find the boxing class interface,
- // but consider this future-proofing.
- if (BoxClass)
- os << *BoxClass << " b";
- else
- os << "B";
-
- os << "oxed expression produces an object with a +0 retain count";
- }
- }
- else if (isa<ObjCIvarRefExpr>(S)) {
- os << "Object loaded from instance variable";
- }
- else {
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- // Get the name of the callee (if it is available).
- SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
- if (const FunctionDecl *FD = X.getAsFunctionDecl())
- os << "Call to function '" << *FD << '\'';
- else
- os << "function call";
- }
- else {
- assert(isa<ObjCMessageExpr>(S));
- CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
- CallEventRef<ObjCMethodCall> Call
- = Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
-
- switch (Call->getMessageKind()) {
- case OCM_Message:
- os << "Method";
- break;
- case OCM_PropertyAccess:
- os << "Property";
- break;
- case OCM_Subscript:
- os << "Subscript";
- break;
- }
- }
-
- if (CurrV.getObjKind() == RetEffect::CF) {
- os << " returns a Core Foundation object of type "
- << Sym->getType().getAsString() << " with a ";
- } else if (CurrV.getObjKind() == RetEffect::Generalized) {
- os << " returns an object of type " << Sym->getType().getAsString()
- << " with a ";
- } else {
- assert (CurrV.getObjKind() == RetEffect::ObjC);
- QualType T = Sym->getType();
- if (!isa<ObjCObjectPointerType>(T)) {
- os << " returns an Objective-C object with a ";
- } else {
- const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
- os << " returns an instance of "
- << PT->getPointeeType().getAsString() << " with a ";
- }
- }
-
- if (CurrV.isOwned()) {
- os << "+1 retain count";
-
- if (GCEnabled) {
- assert(CurrV.getObjKind() == RetEffect::CF);
- os << ". "
- "Core Foundation objects are not automatically garbage collected.";
- }
- }
- else {
- assert (CurrV.isNotOwned());
- os << "+0 retain count";
- }
- }
-
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
- }
-
- // Gather up the effects that were performed on the object at this
- // program point
- SmallVector<ArgEffect, 2> AEffects;
-
- const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
- if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
- // We only have summaries attached to nodes after evaluating CallExpr and
- // ObjCMessageExprs.
- const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
-
- if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
- // Iterate through the parameter expressions and see if the symbol
- // was ever passed as an argument.
- unsigned i = 0;
-
- for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
- AI!=AE; ++AI, ++i) {
-
- // Retrieve the value of the argument. Is it the symbol
- // we are interested in?
- if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
- continue;
-
- // We have an argument. Get the effect!
- AEffects.push_back(Summ->getArg(i));
- }
- }
- else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
- if (const Expr *receiver = ME->getInstanceReceiver())
- if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
- .getAsLocSymbol() == Sym) {
- // The symbol we are tracking is the receiver.
- AEffects.push_back(Summ->getReceiverEffect());
- }
- }
- }
-
- do {
- // Get the previous type state.
- RefVal PrevV = *PrevT;
-
- // Specially handle -dealloc.
- if (!GCEnabled && std::find(AEffects.begin(), AEffects.end(), Dealloc) !=
- AEffects.end()) {
- // Determine if the object's reference count was pushed to zero.
- assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
- // We may not have transitioned to 'release' if we hit an error.
- // This case is handled elsewhere.
- if (CurrV.getKind() == RefVal::Released) {
- assert(CurrV.getCombinedCounts() == 0);
- os << "Object released by directly sending the '-dealloc' message";
- break;
- }
- }
-
- // Specially handle CFMakeCollectable and friends.
- if (std::find(AEffects.begin(), AEffects.end(), MakeCollectable) !=
- AEffects.end()) {
- // Get the name of the function.
- const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
- SVal X =
- CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
- const FunctionDecl *FD = X.getAsFunctionDecl();
-
- if (GCEnabled) {
- // Determine if the object's reference count was pushed to zero.
- assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
-
- os << "In GC mode a call to '" << *FD
- << "' decrements an object's retain count and registers the "
- "object with the garbage collector. ";
-
- if (CurrV.getKind() == RefVal::Released) {
- assert(CurrV.getCount() == 0);
- os << "Since it now has a 0 retain count the object can be "
- "automatically collected by the garbage collector.";
- }
- else
- os << "An object must have a 0 retain count to be garbage collected. "
- "After this call its retain count is +" << CurrV.getCount()
- << '.';
- }
- else
- os << "When GC is not enabled a call to '" << *FD
- << "' has no effect on its argument.";
-
- // Nothing more to say.
- break;
- }
-
- // Determine if the typestate has changed.
- if (!PrevV.hasSameState(CurrV))
- switch (CurrV.getKind()) {
- case RefVal::Owned:
- case RefVal::NotOwned:
- if (PrevV.getCount() == CurrV.getCount()) {
- // Did an autorelease message get sent?
- if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
- return nullptr;
-
- assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
- os << "Object autoreleased";
- break;
- }
-
- if (PrevV.getCount() > CurrV.getCount())
- os << "Reference count decremented.";
- else
- os << "Reference count incremented.";
-
- if (unsigned Count = CurrV.getCount())
- os << " The object now has a +" << Count << " retain count.";
-
- if (PrevV.getKind() == RefVal::Released) {
- assert(GCEnabled && CurrV.getCount() > 0);
- os << " The object is not eligible for garbage collection until "
- "the retain count reaches 0 again.";
- }
-
- break;
-
- case RefVal::Released:
- if (CurrV.getIvarAccessHistory() ==
- RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
- CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
- os << "Strong instance variable relinquished. ";
- }
- os << "Object released.";
- break;
-
- case RefVal::ReturnedOwned:
- // Autoreleases can be applied after marking a node ReturnedOwned.
- if (CurrV.getAutoreleaseCount())
- return nullptr;
-
- os << "Object returned to caller as an owning reference (single "
- "retain count transferred to caller)";
- break;
-
- case RefVal::ReturnedNotOwned:
- os << "Object returned to caller with a +0 retain count";
- break;
-
- default:
- return nullptr;
- }
-
- // Emit any remaining diagnostics for the argument effects (if any).
- for (SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
- E=AEffects.end(); I != E; ++I) {
-
- // A bunch of things have alternate behavior under GC.
- if (GCEnabled)
- switch (*I) {
- default: break;
- case Autorelease:
- os << "In GC mode an 'autorelease' has no effect.";
- continue;
- case IncRefMsg:
- os << "In GC mode the 'retain' message has no effect.";
- continue;
- case DecRefMsg:
- os << "In GC mode the 'release' message has no effect.";
- continue;
- }
- }
- } while (0);
-
- if (os.str().empty())
- return nullptr; // We have nothing to say!
-
- const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
- auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
-
- // Add the range by scanning the children of the statement for any bindings
- // to Sym.
- for (const Stmt *Child : S->children())
- if (const Expr *Exp = dyn_cast_or_null<Expr>(Child))
- if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
- P->addRange(Exp->getSourceRange());
- break;
- }
-
- return std::move(P);
-}
-
-namespace {
-// Find the first node in the current function context that referred to the
-// tracked symbol and the memory location that value was stored to. Note, the
-// value is only reported if the allocation occurred in the same function as
-// the leak. The function can also return a location context, which should be
-// treated as interesting.
-struct AllocationInfo {
- const ExplodedNode* N;
- const MemRegion *R;
- const LocationContext *InterestingMethodContext;
- AllocationInfo(const ExplodedNode *InN,
- const MemRegion *InR,
- const LocationContext *InInterestingMethodContext) :
- N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
-};
-} // end anonymous namespace
-
-static AllocationInfo
-GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
- SymbolRef Sym) {
- const ExplodedNode *AllocationNode = N;
- const ExplodedNode *AllocationNodeInCurrentOrParentContext = N;
- const MemRegion *FirstBinding = nullptr;
- const LocationContext *LeakContext = N->getLocationContext();
-
- // The location context of the init method called on the leaked object, if
- // available.
- const LocationContext *InitMethodContext = nullptr;
-
- while (N) {
- ProgramStateRef St = N->getState();
- const LocationContext *NContext = N->getLocationContext();
-
- if (!getRefBinding(St, Sym))
- break;
-
- StoreManager::FindUniqueBinding FB(Sym);
- StateMgr.iterBindings(St, FB);
-
- if (FB) {
- const MemRegion *R = FB.getRegion();
- const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
- // Do not show local variables belonging to a function other than
- // where the error is reported.
- if (!VR || VR->getStackFrame() == LeakContext->getStackFrame())
- FirstBinding = R;
- }
-
- // AllocationNode is the last node in which the symbol was tracked.
- AllocationNode = N;
-
- // AllocationNodeInCurrentContext, is the last node in the current or
- // parent context in which the symbol was tracked.
- //
- // Note that the allocation site might be in the parent conext. For example,
- // the case where an allocation happens in a block that captures a reference
- // to it and that reference is overwritten/dropped by another call to
- // the block.
- if (NContext == LeakContext || NContext->isParentOf(LeakContext))
- AllocationNodeInCurrentOrParentContext = N;
-
- // Find the last init that was called on the given symbol and store the
- // init method's location context.
- if (!InitMethodContext)
- if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
- const Stmt *CE = CEP->getCallExpr();
- if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
- const Stmt *RecExpr = ME->getInstanceReceiver();
- if (RecExpr) {
- SVal RecV = St->getSVal(RecExpr, NContext);
- if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
- InitMethodContext = CEP->getCalleeContext();
- }
- }
- }
-
- N = N->pred_empty() ? nullptr : *(N->pred_begin());
- }
-
- // If we are reporting a leak of the object that was allocated with alloc,
- // mark its init method as interesting.
- const LocationContext *InterestingMethodContext = nullptr;
- if (InitMethodContext) {
- const ProgramPoint AllocPP = AllocationNode->getLocation();
- if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
- if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
- if (ME->getMethodFamily() == OMF_alloc)
- InterestingMethodContext = InitMethodContext;
- }
-
- // If allocation happened in a function different from the leak node context,
- // do not report the binding.
- assert(N && "Could not find allocation node");
- if (N->getLocationContext() != LeakContext) {
- FirstBinding = nullptr;
- }
-
- return AllocationInfo(AllocationNodeInCurrentOrParentContext,
- FirstBinding,
- InterestingMethodContext);
-}
-
-std::shared_ptr<PathDiagnosticPiece>
-CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndN, BugReport &BR) {
- BR.markInteresting(Sym);
- return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
-}
-
-std::shared_ptr<PathDiagnosticPiece>
-CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndN, BugReport &BR) {
-
- // Tell the BugReporterContext to report cases when the tracked symbol is
- // assigned to different variables, etc.
- BR.markInteresting(Sym);
-
- // We are reporting a leak. Walk up the graph to get to the first node where
- // the symbol appeared, and also get the first VarDecl that tracked object
- // is stored to.
- AllocationInfo AllocI =
- GetAllocationSite(BRC.getStateManager(), EndN, Sym);
-
- const MemRegion* FirstBinding = AllocI.R;
- BR.markInteresting(AllocI.InterestingMethodContext);
-
- SourceManager& SM = BRC.getSourceManager();
-
- // Compute an actual location for the leak. Sometimes a leak doesn't
- // occur at an actual statement (e.g., transition between blocks; end
- // of function) so we need to walk the graph and compute a real location.
- const ExplodedNode *LeakN = EndN;
- PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
-
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
-
- os << "Object leaked: ";
-
- Optional<std::string> RegionDescription = describeRegion(FirstBinding);
- if (RegionDescription) {
- os << "object allocated and stored into '" << *RegionDescription << '\'';
- }
- else
- os << "allocated object";
-
- // Get the retain count.
- const RefVal* RV = getRefBinding(EndN->getState(), Sym);
- assert(RV);
-
- if (RV->getKind() == RefVal::ErrorLeakReturned) {
- // FIXME: Per comments in rdar://6320065, "create" only applies to CF
- // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
- // to the caller for NS objects.
- const Decl *D = &EndN->getCodeDecl();
-
- os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
- : " is returned from a function ");
-
- if (D->hasAttr<CFReturnsNotRetainedAttr>())
- os << "that is annotated as CF_RETURNS_NOT_RETAINED";
- else if (D->hasAttr<NSReturnsNotRetainedAttr>())
- os << "that is annotated as NS_RETURNS_NOT_RETAINED";
- else {
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (BRC.getASTContext().getLangOpts().ObjCAutoRefCount) {
- os << "managed by Automatic Reference Counting";
- } else {
- os << "whose name ('" << MD->getSelector().getAsString()
- << "') does not start with "
- "'copy', 'mutableCopy', 'alloc' or 'new'."
- " This violates the naming convention rules"
- " given in the Memory Management Guide for Cocoa";
- }
- }
- else {
- const FunctionDecl *FD = cast<FunctionDecl>(D);
- os << "whose name ('" << *FD
- << "') does not contain 'Copy' or 'Create'. This violates the naming"
- " convention rules given in the Memory Management Guide for Core"
- " Foundation";
- }
- }
- }
- else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
- const ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
- os << " and returned from method '" << MD.getSelector().getAsString()
- << "' is potentially leaked when using garbage collection. Callers "
- "of this method do not expect a returned object with a +1 retain "
- "count since they expect the object to be managed by the garbage "
- "collector";
- }
- else
- os << " is not referenced later in this execution path and has a retain "
- "count of +" << RV->getCount();
-
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
-}
-
-void CFRefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
- const SourceManager& SMgr = Ctx.getSourceManager();
-
- if (!sym->getOriginRegion())
- return;
-
- auto *Region = dyn_cast<DeclRegion>(sym->getOriginRegion());
- if (Region) {
- const Decl *PDecl = Region->getDecl();
- if (PDecl && isa<ParmVarDecl>(PDecl)) {
- PathDiagnosticLocation ParamLocation = PathDiagnosticLocation::create(PDecl, SMgr);
- Location = ParamLocation;
- UniqueingLocation = ParamLocation;
- UniqueingDecl = Ctx.getLocationContext()->getDecl();
- }
- }
-}
-
-void CFRefLeakReport::deriveAllocLocation(CheckerContext &Ctx,SymbolRef sym) {
- // Most bug reports are cached at the location where they occurred.
- // With leaks, we want to unique them by the location where they were
- // allocated, and only report a single path. To do this, we need to find
- // the allocation site of a piece of tracked memory, which we do via a
- // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
- // Note that this is *not* the trimmed graph; we are guaranteed, however,
- // that all ancestor nodes that represent the allocation site have the
- // same SourceLocation.
- const ExplodedNode *AllocNode = nullptr;
-
- const SourceManager& SMgr = Ctx.getSourceManager();
-
- AllocationInfo AllocI =
- GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
-
- AllocNode = AllocI.N;
- AllocBinding = AllocI.R;
- markInteresting(AllocI.InterestingMethodContext);
-
- // Get the SourceLocation for the allocation site.
- // FIXME: This will crash the analyzer if an allocation comes from an
- // implicit call (ex: a destructor call).
- // (Currently there are no such allocations in Cocoa, though.)
- AllocStmt = PathDiagnosticLocation::getStmt(AllocNode);
-
- if (!AllocStmt) {
- AllocBinding = nullptr;
- return;
- }
-
- PathDiagnosticLocation AllocLocation =
- PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
- AllocNode->getLocationContext());
- Location = AllocLocation;
-
- // Set uniqieing info, which will be used for unique the bug reports. The
- // leaks should be uniqued on the allocation site.
- UniqueingLocation = AllocLocation;
- UniqueingDecl = AllocNode->getLocationContext()->getDecl();
-}
-
-void CFRefLeakReport::createDescription(CheckerContext &Ctx, bool GCEnabled,
- bool IncludeAllocationLine) {
- assert(Location.isValid() && UniqueingDecl && UniqueingLocation.isValid());
- Description.clear();
- llvm::raw_string_ostream os(Description);
- os << "Potential leak ";
- if (GCEnabled)
- os << "(when using garbage collection) ";
- os << "of an object";
-
- Optional<std::string> RegionDescription = describeRegion(AllocBinding);
- if (RegionDescription) {
- os << " stored into '" << *RegionDescription << '\'';
- if (IncludeAllocationLine) {
- FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
- os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
- }
- }
-}
-
-CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
- bool GCEnabled, const SummaryLogTy &Log,
- ExplodedNode *n, SymbolRef sym,
- CheckerContext &Ctx,
- bool IncludeAllocationLine)
- : CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
-
- deriveAllocLocation(Ctx, sym);
- if (!AllocBinding)
- deriveParamLocation(Ctx, sym);
-
- createDescription(Ctx, GCEnabled, IncludeAllocationLine);
-
- addVisitor(llvm::make_unique<CFRefLeakReportVisitor>(sym, GCEnabled, Log));
-}
-
-//===----------------------------------------------------------------------===//
-// Main checker logic.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class RetainCountChecker
- : public Checker< check::Bind,
- check::DeadSymbols,
- check::EndAnalysis,
- check::BeginFunction,
- check::EndFunction,
- check::PostStmt<BlockExpr>,
- check::PostStmt<CastExpr>,
- check::PostStmt<ObjCArrayLiteral>,
- check::PostStmt<ObjCDictionaryLiteral>,
- check::PostStmt<ObjCBoxedExpr>,
- check::PostStmt<ObjCIvarRefExpr>,
- check::PostCall,
- check::PreStmt<ReturnStmt>,
- check::RegionChanges,
- eval::Assume,
- eval::Call > {
- mutable std::unique_ptr<CFRefBug> useAfterRelease, releaseNotOwned;
- mutable std::unique_ptr<CFRefBug> deallocGC, deallocNotOwned;
- mutable std::unique_ptr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
- mutable std::unique_ptr<CFRefBug> leakWithinFunction, leakAtReturn;
- mutable std::unique_ptr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
-
- typedef llvm::DenseMap<SymbolRef, const CheckerProgramPointTag *> SymbolTagMap;
-
- // This map is only used to ensure proper deletion of any allocated tags.
- mutable SymbolTagMap DeadSymbolTags;
-
- mutable std::unique_ptr<RetainSummaryManager> Summaries;
- mutable std::unique_ptr<RetainSummaryManager> SummariesGC;
- mutable SummaryLogTy SummaryLog;
- mutable bool ShouldResetSummaryLog;
-
- /// Optional setting to indicate if leak reports should include
- /// the allocation line.
- mutable bool IncludeAllocationLine;
-
-public:
- RetainCountChecker(AnalyzerOptions &AO)
- : ShouldResetSummaryLog(false),
- IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
-
- ~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); }
-
- void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
- ExprEngine &Eng) const {
- // FIXME: This is a hack to make sure the summary log gets cleared between
- // analyses of different code bodies.
- //
- // Why is this necessary? Because a checker's lifetime is tied to a
- // translation unit, but an ExplodedGraph's lifetime is just a code body.
- // Once in a blue moon, a new ExplodedNode will have the same address as an
- // old one with an associated summary, and the bug report visitor gets very
- // confused. (To make things worse, the summary lifetime is currently also
- // tied to a code body, so we get a crash instead of incorrect results.)
- //
- // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
- // changes, things will start going wrong again. Really the lifetime of this
- // log needs to be tied to either the specific nodes in it or the entire
- // ExplodedGraph, not to a specific part of the code being analyzed.
- //
- // (Also, having stateful local data means that the same checker can't be
- // used from multiple threads, but a lot of checkers have incorrect
- // assumptions about that anyway. So that wasn't a priority at the time of
- // this fix.)
- //
- // This happens at the end of analysis, but bug reports are emitted /after/
- // this point. So we can't just clear the summary log now. Instead, we mark
- // that the next time we access the summary log, it should be cleared.
-
- // If we never reset the summary log during /this/ code body analysis,
- // there were no new summaries. There might still have been summaries from
- // the /last/ analysis, so clear them out to make sure the bug report
- // visitors don't get confused.
- if (ShouldResetSummaryLog)
- SummaryLog.clear();
-
- ShouldResetSummaryLog = !SummaryLog.empty();
- }
-
- CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts,
- bool GCEnabled) const {
- if (GCEnabled) {
- if (!leakWithinFunctionGC)
- leakWithinFunctionGC.reset(new Leak(this, "Leak of object when using "
- "garbage collection"));
- return leakWithinFunctionGC.get();
- } else {
- if (!leakWithinFunction) {
- if (LOpts.getGC() == LangOptions::HybridGC) {
- leakWithinFunction.reset(new Leak(this,
- "Leak of object when not using "
- "garbage collection (GC) in "
- "dual GC/non-GC code"));
- } else {
- leakWithinFunction.reset(new Leak(this, "Leak"));
- }
- }
- return leakWithinFunction.get();
- }
- }
-
- CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
- if (GCEnabled) {
- if (!leakAtReturnGC)
- leakAtReturnGC.reset(new Leak(this,
- "Leak of returned object when using "
- "garbage collection"));
- return leakAtReturnGC.get();
- } else {
- if (!leakAtReturn) {
- if (LOpts.getGC() == LangOptions::HybridGC) {
- leakAtReturn.reset(new Leak(this,
- "Leak of returned object when not using "
- "garbage collection (GC) in dual "
- "GC/non-GC code"));
- } else {
- leakAtReturn.reset(new Leak(this, "Leak of returned object"));
- }
- }
- return leakAtReturn.get();
- }
- }
-
- RetainSummaryManager &getSummaryManager(ASTContext &Ctx,
- bool GCEnabled) const {
- // FIXME: We don't support ARC being turned on and off during one analysis.
- // (nor, for that matter, do we support changing ASTContexts)
- bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
- if (GCEnabled) {
- if (!SummariesGC)
- SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
- else
- assert(SummariesGC->isARCEnabled() == ARCEnabled);
- return *SummariesGC;
- } else {
- if (!Summaries)
- Summaries.reset(new RetainSummaryManager(Ctx, false, ARCEnabled));
- else
- assert(Summaries->isARCEnabled() == ARCEnabled);
- return *Summaries;
- }
- }
-
- RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
- return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
- }
-
- void printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const override;
-
- void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
- void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
- void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
-
- void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
- void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
- void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
-
- void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
-
- void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
-
- void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
- CheckerContext &C) const;
-
- void processSummaryOfInlined(const RetainSummary &Summ,
- const CallEvent &Call,
- CheckerContext &C) const;
-
- bool evalCall(const CallExpr *CE, CheckerContext &C) const;
-
- ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
- bool Assumption) const;
-
- ProgramStateRef
- checkRegionChanges(ProgramStateRef state,
- const InvalidatedSymbols *invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext* LCtx,
- const CallEvent *Call) const;
-
- void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
- void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
- ExplodedNode *Pred, RetEffect RE, RefVal X,
- SymbolRef Sym, ProgramStateRef state) const;
-
- void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
- void checkBeginFunction(CheckerContext &C) const;
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
-
- ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
- RefVal V, ArgEffect E, RefVal::Kind &hasErr,
- CheckerContext &C) const;
-
- void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
- RefVal::Kind ErrorKind, SymbolRef Sym,
- CheckerContext &C) const;
-
- void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
-
- const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
-
- ProgramStateRef handleSymbolDeath(ProgramStateRef state,
- SymbolRef sid, RefVal V,
- SmallVectorImpl<SymbolRef> &Leaked) const;
-
- ProgramStateRef
- handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
- const ProgramPointTag *Tag, CheckerContext &Ctx,
- SymbolRef Sym, RefVal V) const;
-
- ExplodedNode *processLeaks(ProgramStateRef state,
- SmallVectorImpl<SymbolRef> &Leaked,
- CheckerContext &Ctx,
- ExplodedNode *Pred = nullptr) const;
-};
-} // end anonymous namespace
-
-namespace {
-class StopTrackingCallback final : public SymbolVisitor {
- ProgramStateRef state;
-public:
- StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
- ProgramStateRef getState() const { return state; }
-
- bool VisitSymbol(SymbolRef sym) override {
- state = state->remove<RefBindings>(sym);
- return true;
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// Handle statements that may have an effect on refcounts.
-//===----------------------------------------------------------------------===//
-
-void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
- CheckerContext &C) const {
-
- // Scan the BlockDecRefExprs for any object the retain count checker
- // may be tracking.
- if (!BE->getBlockDecl()->hasCaptures())
- return;
-
- ProgramStateRef state = C.getState();
- auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
-
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- if (I == E)
- return;
-
- // FIXME: For now we invalidate the tracking of all symbols passed to blocks
- // via captured variables, even though captured variables result in a copy
- // and in implicit increment/decrement of a retain count.
- SmallVector<const MemRegion*, 10> Regions;
- const LocationContext *LC = C.getLocationContext();
- MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
-
- for ( ; I != E; ++I) {
- const VarRegion *VR = I.getCapturedRegion();
- if (VR->getSuperRegion() == R) {
- VR = MemMgr.getVarRegion(VR->getDecl(), LC);
- }
- Regions.push_back(VR);
- }
-
- state =
- state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
- Regions.data() + Regions.size()).getState();
- C.addTransition(state);
-}
-
-void RetainCountChecker::checkPostStmt(const CastExpr *CE,
- CheckerContext &C) const {
- const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
- if (!BE)
- return;
-
- ArgEffect AE = IncRef;
-
- switch (BE->getBridgeKind()) {
- case clang::OBC_Bridge:
- // Do nothing.
- return;
- case clang::OBC_BridgeRetained:
- AE = IncRef;
- break;
- case clang::OBC_BridgeTransfer:
- AE = DecRefBridgedTransferred;
- break;
- }
-
- ProgramStateRef state = C.getState();
- SymbolRef Sym = C.getSVal(CE).getAsLocSymbol();
- if (!Sym)
- return;
- const RefVal* T = getRefBinding(state, Sym);
- if (!T)
- return;
-
- RefVal::Kind hasErr = (RefVal::Kind) 0;
- state = updateSymbol(state, Sym, *T, AE, hasErr, C);
-
- if (hasErr) {
- // FIXME: If we get an error during a bridge cast, should we report it?
- return;
- }
-
- C.addTransition(state);
-}
-
-void RetainCountChecker::processObjCLiterals(CheckerContext &C,
- const Expr *Ex) const {
- ProgramStateRef state = C.getState();
- const ExplodedNode *pred = C.getPredecessor();
- for (const Stmt *Child : Ex->children()) {
- SVal V = pred->getSVal(Child);
- if (SymbolRef sym = V.getAsSymbol())
- if (const RefVal* T = getRefBinding(state, sym)) {
- RefVal::Kind hasErr = (RefVal::Kind) 0;
- state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
- if (hasErr) {
- processNonLeakError(state, Child->getSourceRange(), hasErr, sym, C);
- return;
- }
- }
- }
-
- // Return the object as autoreleased.
- // RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
- if (SymbolRef sym =
- state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
- QualType ResultTy = Ex->getType();
- state = setRefBinding(state, sym,
- RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
- }
-
- C.addTransition(state);
-}
-
-void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
- CheckerContext &C) const {
- // Apply the 'MayEscape' to all values.
- processObjCLiterals(C, AL);
-}
-
-void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
- CheckerContext &C) const {
- // Apply the 'MayEscape' to all keys and values.
- processObjCLiterals(C, DL);
-}
-
-void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
- CheckerContext &C) const {
- const ExplodedNode *Pred = C.getPredecessor();
- ProgramStateRef State = Pred->getState();
-
- if (SymbolRef Sym = Pred->getSVal(Ex).getAsSymbol()) {
- QualType ResultTy = Ex->getType();
- State = setRefBinding(State, Sym,
- RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
- }
-
- C.addTransition(State);
-}
-
-void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
- CheckerContext &C) const {
- Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
- if (!IVarLoc)
- return;
-
- ProgramStateRef State = C.getState();
- SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol();
- if (!Sym || !dyn_cast_or_null<ObjCIvarRegion>(Sym->getOriginRegion()))
- return;
-
- // Accessing an ivar directly is unusual. If we've done that, be more
- // forgiving about what the surrounding code is allowed to do.
-
- QualType Ty = Sym->getType();
- RetEffect::ObjKind Kind;
- if (Ty->isObjCRetainableType())
- Kind = RetEffect::ObjC;
- else if (coreFoundation::isCFObjectRef(Ty))
- Kind = RetEffect::CF;
- else
- return;
-
- // If the value is already known to be nil, don't bother tracking it.
- ConstraintManager &CMgr = State->getConstraintManager();
- if (CMgr.isNull(State, Sym).isConstrainedTrue())
- return;
-
- if (const RefVal *RV = getRefBinding(State, Sym)) {
- // If we've seen this symbol before, or we're only seeing it now because
- // of something the analyzer has synthesized, don't do anything.
- if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None ||
- isSynthesizedAccessor(C.getStackFrame())) {
- return;
- }
-
- // Note that this value has been loaded from an ivar.
- C.addTransition(setRefBinding(State, Sym, RV->withIvarAccess()));
- return;
- }
-
- RefVal PlusZero = RefVal::makeNotOwned(Kind, Ty);
-
- // In a synthesized accessor, the effective retain count is +0.
- if (isSynthesizedAccessor(C.getStackFrame())) {
- C.addTransition(setRefBinding(State, Sym, PlusZero));
- return;
- }
-
- State = setRefBinding(State, Sym, PlusZero.withIvarAccess());
- C.addTransition(State);
-}
-
-void RetainCountChecker::checkPostCall(const CallEvent &Call,
- CheckerContext &C) const {
- RetainSummaryManager &Summaries = getSummaryManager(C);
- const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
-
- if (C.wasInlined) {
- processSummaryOfInlined(*Summ, Call, C);
- return;
- }
- checkSummary(*Summ, Call, C);
-}
-
-/// GetReturnType - Used to get the return type of a message expression or
-/// function call with the intention of affixing that type to a tracked symbol.
-/// While the return type can be queried directly from RetEx, when
-/// invoking class methods we augment to the return type to be that of
-/// a pointer to the class (as opposed it just being id).
-// FIXME: We may be able to do this with related result types instead.
-// This function is probably overestimating.
-static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
- QualType RetTy = RetE->getType();
- // If RetE is not a message expression just return its type.
- // If RetE is a message expression, return its types if it is something
- /// more specific than id.
- if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
- if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
- if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
- PT->isObjCClassType()) {
- // At this point we know the return type of the message expression is
- // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
- // is a call to a class method whose type we can resolve. In such
- // cases, promote the return type to XXX* (where XXX is the class).
- const ObjCInterfaceDecl *D = ME->getReceiverInterface();
- return !D ? RetTy :
- Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
- }
-
- return RetTy;
-}
-
-// We don't always get the exact modeling of the function with regards to the
-// retain count checker even when the function is inlined. For example, we need
-// to stop tracking the symbols which were marked with StopTrackingHard.
-void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
- const CallEvent &CallOrMsg,
- CheckerContext &C) const {
- ProgramStateRef state = C.getState();
-
- // Evaluate the effect of the arguments.
- for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
- if (Summ.getArg(idx) == StopTrackingHard) {
- SVal V = CallOrMsg.getArgSVal(idx);
- if (SymbolRef Sym = V.getAsLocSymbol()) {
- state = removeRefBinding(state, Sym);
- }
- }
- }
-
- // Evaluate the effect on the message receiver.
- const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
- if (MsgInvocation) {
- if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
- if (Summ.getReceiverEffect() == StopTrackingHard) {
- state = removeRefBinding(state, Sym);
- }
- }
- }
-
- // Consult the summary for the return value.
- RetEffect RE = Summ.getRetEffect();
- if (RE.getKind() == RetEffect::NoRetHard) {
- SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
- if (Sym)
- state = removeRefBinding(state, Sym);
- }
-
- C.addTransition(state);
-}
-
-static ProgramStateRef updateOutParameter(ProgramStateRef State,
- SVal ArgVal,
- ArgEffect Effect) {
- auto *ArgRegion = dyn_cast_or_null<TypedValueRegion>(ArgVal.getAsRegion());
- if (!ArgRegion)
- return State;
-
- QualType PointeeTy = ArgRegion->getValueType();
- if (!coreFoundation::isCFObjectRef(PointeeTy))
- return State;
-
- SVal PointeeVal = State->getSVal(ArgRegion);
- SymbolRef Pointee = PointeeVal.getAsLocSymbol();
- if (!Pointee)
- return State;
-
- switch (Effect) {
- case UnretainedOutParameter:
- State = setRefBinding(State, Pointee,
- RefVal::makeNotOwned(RetEffect::CF, PointeeTy));
- break;
- case RetainedOutParameter:
- // Do nothing. Retained out parameters will either point to a +1 reference
- // or NULL, but the way you check for failure differs depending on the API.
- // Consequently, we don't have a good way to track them yet.
- break;
-
- default:
- llvm_unreachable("only for out parameters");
- }
-
- return State;
-}
-
-void RetainCountChecker::checkSummary(const RetainSummary &Summ,
- const CallEvent &CallOrMsg,
- CheckerContext &C) const {
- ProgramStateRef state = C.getState();
-
- // Evaluate the effect of the arguments.
- RefVal::Kind hasErr = (RefVal::Kind) 0;
- SourceRange ErrorRange;
- SymbolRef ErrorSym = nullptr;
-
- for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
- SVal V = CallOrMsg.getArgSVal(idx);
-
- ArgEffect Effect = Summ.getArg(idx);
- if (Effect == RetainedOutParameter || Effect == UnretainedOutParameter) {
- state = updateOutParameter(state, V, Effect);
- } else if (SymbolRef Sym = V.getAsLocSymbol()) {
- if (const RefVal *T = getRefBinding(state, Sym)) {
- state = updateSymbol(state, Sym, *T, Effect, hasErr, C);
- if (hasErr) {
- ErrorRange = CallOrMsg.getArgSourceRange(idx);
- ErrorSym = Sym;
- break;
- }
- }
- }
- }
-
- // Evaluate the effect on the message receiver.
- bool ReceiverIsTracked = false;
- if (!hasErr) {
- const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
- if (MsgInvocation) {
- if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
- if (const RefVal *T = getRefBinding(state, Sym)) {
- ReceiverIsTracked = true;
- state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
- hasErr, C);
- if (hasErr) {
- ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
- ErrorSym = Sym;
- }
- }
- }
- }
- }
-
- // Process any errors.
- if (hasErr) {
- processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
- return;
- }
-
- // Consult the summary for the return value.
- RetEffect RE = Summ.getRetEffect();
-
- if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
- if (ReceiverIsTracked)
- RE = getSummaryManager(C).getObjAllocRetEffect();
- else
- RE = RetEffect::MakeNoRet();
- }
-
- switch (RE.getKind()) {
- default:
- llvm_unreachable("Unhandled RetEffect.");
-
- case RetEffect::NoRet:
- case RetEffect::NoRetHard:
- // No work necessary.
- break;
-
- case RetEffect::OwnedSymbol: {
- SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
- if (!Sym)
- break;
-
- // Use the result type from the CallEvent as it automatically adjusts
- // for methods/functions that return references.
- QualType ResultTy = CallOrMsg.getResultType();
- state = setRefBinding(state, Sym, RefVal::makeOwned(RE.getObjKind(),
- ResultTy));
-
- // FIXME: Add a flag to the checker where allocations are assumed to
- // *not* fail.
- break;
- }
-
- case RetEffect::GCNotOwnedSymbol:
- case RetEffect::NotOwnedSymbol: {
- const Expr *Ex = CallOrMsg.getOriginExpr();
- SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
- if (!Sym)
- break;
- assert(Ex);
- // Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
- QualType ResultTy = GetReturnType(Ex, C.getASTContext());
- state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
- ResultTy));
- break;
- }
- }
-
- // This check is actually necessary; otherwise the statement builder thinks
- // we've hit a previously-found path.
- // Normally addTransition takes care of this, but we want the node pointer.
- ExplodedNode *NewNode;
- if (state == C.getState()) {
- NewNode = C.getPredecessor();
- } else {
- NewNode = C.addTransition(state);
- }
-
- // Annotate the node with summary we used.
- if (NewNode) {
- // FIXME: This is ugly. See checkEndAnalysis for why it's necessary.
- if (ShouldResetSummaryLog) {
- SummaryLog.clear();
- ShouldResetSummaryLog = false;
- }
- SummaryLog[NewNode] = &Summ;
- }
-}
-
-ProgramStateRef
-RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
- RefVal V, ArgEffect E, RefVal::Kind &hasErr,
- CheckerContext &C) const {
- // In GC mode [... release] and [... retain] do nothing.
- // In ARC mode they shouldn't exist at all, but we just ignore them.
- bool IgnoreRetainMsg = C.isObjCGCEnabled();
- if (!IgnoreRetainMsg)
- IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
-
- switch (E) {
- default:
- break;
- case IncRefMsg:
- E = IgnoreRetainMsg ? DoNothing : IncRef;
- break;
- case DecRefMsg:
- E = IgnoreRetainMsg ? DoNothing : DecRef;
- break;
- case DecRefMsgAndStopTrackingHard:
- E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard;
- break;
- case MakeCollectable:
- E = C.isObjCGCEnabled() ? DecRef : DoNothing;
- break;
- }
-
- // Handle all use-after-releases.
- if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
- V = V ^ RefVal::ErrorUseAfterRelease;
- hasErr = V.getKind();
- return setRefBinding(state, sym, V);
- }
-
- switch (E) {
- case DecRefMsg:
- case IncRefMsg:
- case MakeCollectable:
- case DecRefMsgAndStopTrackingHard:
- llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
-
- case UnretainedOutParameter:
- case RetainedOutParameter:
- llvm_unreachable("Applies to pointer-to-pointer parameters, which should "
- "not have ref state.");
-
- case Dealloc:
- // Any use of -dealloc in GC is *bad*.
- if (C.isObjCGCEnabled()) {
- V = V ^ RefVal::ErrorDeallocGC;
- hasErr = V.getKind();
- break;
- }
-
- switch (V.getKind()) {
- default:
- llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
- case RefVal::Owned:
- // The object immediately transitions to the released state.
- V = V ^ RefVal::Released;
- V.clearCounts();
- return setRefBinding(state, sym, V);
- case RefVal::NotOwned:
- V = V ^ RefVal::ErrorDeallocNotOwned;
- hasErr = V.getKind();
- break;
- }
- break;
-
- case MayEscape:
- if (V.getKind() == RefVal::Owned) {
- V = V ^ RefVal::NotOwned;
- break;
- }
-
- // Fall-through.
-
- case DoNothing:
- return state;
-
- case Autorelease:
- if (C.isObjCGCEnabled())
- return state;
- // Update the autorelease counts.
- V = V.autorelease();
- break;
-
- case StopTracking:
- case StopTrackingHard:
- return removeRefBinding(state, sym);
-
- case IncRef:
- switch (V.getKind()) {
- default:
- llvm_unreachable("Invalid RefVal state for a retain.");
- case RefVal::Owned:
- case RefVal::NotOwned:
- V = V + 1;
- break;
- case RefVal::Released:
- // Non-GC cases are handled above.
- assert(C.isObjCGCEnabled());
- V = (V ^ RefVal::Owned) + 1;
- break;
- }
- break;
-
- case DecRef:
- case DecRefBridgedTransferred:
- case DecRefAndStopTrackingHard:
- switch (V.getKind()) {
- default:
- // case 'RefVal::Released' handled above.
- llvm_unreachable("Invalid RefVal state for a release.");
-
- case RefVal::Owned:
- assert(V.getCount() > 0);
- if (V.getCount() == 1) {
- if (E == DecRefBridgedTransferred ||
- V.getIvarAccessHistory() ==
- RefVal::IvarAccessHistory::AccessedDirectly)
- V = V ^ RefVal::NotOwned;
- else
- V = V ^ RefVal::Released;
- } else if (E == DecRefAndStopTrackingHard) {
- return removeRefBinding(state, sym);
- }
-
- V = V - 1;
- break;
-
- case RefVal::NotOwned:
- if (V.getCount() > 0) {
- if (E == DecRefAndStopTrackingHard)
- return removeRefBinding(state, sym);
- V = V - 1;
- } else if (V.getIvarAccessHistory() ==
- RefVal::IvarAccessHistory::AccessedDirectly) {
- // Assume that the instance variable was holding on the object at
- // +1, and we just didn't know.
- if (E == DecRefAndStopTrackingHard)
- return removeRefBinding(state, sym);
- V = V.releaseViaIvar() ^ RefVal::Released;
- } else {
- V = V ^ RefVal::ErrorReleaseNotOwned;
- hasErr = V.getKind();
- }
- break;
-
- case RefVal::Released:
- // Non-GC cases are handled above.
- assert(C.isObjCGCEnabled());
- V = V ^ RefVal::ErrorUseAfterRelease;
- hasErr = V.getKind();
- break;
- }
- break;
- }
- return setRefBinding(state, sym, V);
-}
-
-void RetainCountChecker::processNonLeakError(ProgramStateRef St,
- SourceRange ErrorRange,
- RefVal::Kind ErrorKind,
- SymbolRef Sym,
- CheckerContext &C) const {
- // HACK: Ignore retain-count issues on values accessed through ivars,
- // because of cases like this:
- // [_contentView retain];
- // [_contentView removeFromSuperview];
- // [self addSubview:_contentView]; // invalidates 'self'
- // [_contentView release];
- if (const RefVal *RV = getRefBinding(St, Sym))
- if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
- return;
-
- ExplodedNode *N = C.generateErrorNode(St);
- if (!N)
- return;
-
- CFRefBug *BT;
- switch (ErrorKind) {
- default:
- llvm_unreachable("Unhandled error.");
- case RefVal::ErrorUseAfterRelease:
- if (!useAfterRelease)
- useAfterRelease.reset(new UseAfterRelease(this));
- BT = useAfterRelease.get();
- break;
- case RefVal::ErrorReleaseNotOwned:
- if (!releaseNotOwned)
- releaseNotOwned.reset(new BadRelease(this));
- BT = releaseNotOwned.get();
- break;
- case RefVal::ErrorDeallocGC:
- if (!deallocGC)
- deallocGC.reset(new DeallocGC(this));
- BT = deallocGC.get();
- break;
- case RefVal::ErrorDeallocNotOwned:
- if (!deallocNotOwned)
- deallocNotOwned.reset(new DeallocNotOwned(this));
- BT = deallocNotOwned.get();
- break;
- }
-
- assert(BT);
- auto report = std::unique_ptr<BugReport>(
- new CFRefReport(*BT, C.getASTContext().getLangOpts(), C.isObjCGCEnabled(),
- SummaryLog, N, Sym));
- report->addRange(ErrorRange);
- C.emitReport(std::move(report));
-}
-
-//===----------------------------------------------------------------------===//
-// Handle the return values of retain-count-related functions.
-//===----------------------------------------------------------------------===//
-
-bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
- // Get the callee. We're only interested in simple C functions.
- ProgramStateRef state = C.getState();
- const FunctionDecl *FD = C.getCalleeDecl(CE);
- if (!FD)
- return false;
-
- IdentifierInfo *II = FD->getIdentifier();
- if (!II)
- return false;
-
- // For now, we're only handling the functions that return aliases of their
- // arguments: CFRetain and CFMakeCollectable (and their families).
- // Eventually we should add other functions we can model entirely,
- // such as CFRelease, which don't invalidate their arguments or globals.
- if (CE->getNumArgs() != 1)
- return false;
-
- // Get the name of the function.
- StringRef FName = II->getName();
- FName = FName.substr(FName.find_first_not_of('_'));
-
- // See if it's one of the specific functions we know how to eval.
- bool canEval = false;
- // See if the function has 'rc_ownership_trusted_implementation'
- // annotate attribute. If it does, we will not inline it.
- bool hasTrustedImplementationAnnotation = false;
-
- QualType ResultTy = CE->getCallReturnType(C.getASTContext());
- if (ResultTy->isObjCIdType()) {
- // Handle: id NSMakeCollectable(CFTypeRef)
- canEval = II->isStr("NSMakeCollectable");
- } else if (ResultTy->isPointerType()) {
- // Handle: (CF|CG|CV)Retain
- // CFAutorelease
- // CFMakeCollectable
- // It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
- if (cocoa::isRefType(ResultTy, "CF", FName) ||
- cocoa::isRefType(ResultTy, "CG", FName) ||
- cocoa::isRefType(ResultTy, "CV", FName)) {
- canEval = isRetain(FD, FName) || isAutorelease(FD, FName) ||
- isMakeCollectable(FD, FName);
- } else {
- if (FD->getDefinition()) {
- canEval = isTrustedReferenceCountImplementation(FD->getDefinition());
- hasTrustedImplementationAnnotation = canEval;
- }
- }
- }
-
- if (!canEval)
- return false;
-
- // Bind the return value.
- const LocationContext *LCtx = C.getLocationContext();
- SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
- if (RetVal.isUnknown() ||
- (hasTrustedImplementationAnnotation && !ResultTy.isNull())) {
- // If the receiver is unknown or the function has
- // 'rc_ownership_trusted_implementation' annotate attribute, conjure a
- // return value.
- SValBuilder &SVB = C.getSValBuilder();
- RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
- }
- state = state->BindExpr(CE, LCtx, RetVal, false);
-
- // FIXME: This should not be necessary, but otherwise the argument seems to be
- // considered alive during the next statement.
- if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
- // Save the refcount status of the argument.
- SymbolRef Sym = RetVal.getAsLocSymbol();
- const RefVal *Binding = nullptr;
- if (Sym)
- Binding = getRefBinding(state, Sym);
-
- // Invalidate the argument region.
- state = state->invalidateRegions(
- ArgRegion, CE, C.blockCount(), LCtx,
- /*CausesPointerEscape*/ hasTrustedImplementationAnnotation);
-
- // Restore the refcount status of the argument.
- if (Binding)
- state = setRefBinding(state, Sym, *Binding);
- }
-
- C.addTransition(state);
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// Handle return statements.
-//===----------------------------------------------------------------------===//
-
-void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
- CheckerContext &C) const {
-
- // Only adjust the reference count if this is the top-level call frame,
- // and not the result of inlining. In the future, we should do
- // better checking even for inlined calls, and see if they match
- // with their expected semantics (e.g., the method should return a retained
- // object, etc.).
- if (!C.inTopFrame())
- return;
-
- const Expr *RetE = S->getRetValue();
- if (!RetE)
- return;
-
- ProgramStateRef state = C.getState();
- SymbolRef Sym =
- state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
- if (!Sym)
- return;
-
- // Get the reference count binding (if any).
- const RefVal *T = getRefBinding(state, Sym);
- if (!T)
- return;
-
- // Change the reference count.
- RefVal X = *T;
-
- switch (X.getKind()) {
- case RefVal::Owned: {
- unsigned cnt = X.getCount();
- assert(cnt > 0);
- X.setCount(cnt - 1);
- X = X ^ RefVal::ReturnedOwned;
- break;
- }
-
- case RefVal::NotOwned: {
- unsigned cnt = X.getCount();
- if (cnt) {
- X.setCount(cnt - 1);
- X = X ^ RefVal::ReturnedOwned;
- }
- else {
- X = X ^ RefVal::ReturnedNotOwned;
- }
- break;
- }
-
- default:
- return;
- }
-
- // Update the binding.
- state = setRefBinding(state, Sym, X);
- ExplodedNode *Pred = C.addTransition(state);
-
- // At this point we have updated the state properly.
- // Everything after this is merely checking to see if the return value has
- // been over- or under-retained.
-
- // Did we cache out?
- if (!Pred)
- return;
-
- // Update the autorelease counts.
- static CheckerProgramPointTag AutoreleaseTag(this, "Autorelease");
- state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X);
-
- // Did we cache out?
- if (!state)
- return;
-
- // Get the updated binding.
- T = getRefBinding(state, Sym);
- assert(T);
- X = *T;
-
- // Consult the summary of the enclosing method.
- RetainSummaryManager &Summaries = getSummaryManager(C);
- const Decl *CD = &Pred->getCodeDecl();
- RetEffect RE = RetEffect::MakeNoRet();
-
- // FIXME: What is the convention for blocks? Is there one?
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
- const RetainSummary *Summ = Summaries.getMethodSummary(MD);
- RE = Summ->getRetEffect();
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
- if (!isa<CXXMethodDecl>(FD)) {
- const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
- RE = Summ->getRetEffect();
- }
- }
-
- checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
-}
-
-void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
- CheckerContext &C,
- ExplodedNode *Pred,
- RetEffect RE, RefVal X,
- SymbolRef Sym,
- ProgramStateRef state) const {
- // HACK: Ignore retain-count issues on values accessed through ivars,
- // because of cases like this:
- // [_contentView retain];
- // [_contentView removeFromSuperview];
- // [self addSubview:_contentView]; // invalidates 'self'
- // [_contentView release];
- if (X.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
- return;
-
- // Any leaks or other errors?
- if (X.isReturnedOwned() && X.getCount() == 0) {
- if (RE.getKind() != RetEffect::NoRet) {
- bool hasError = false;
- if (C.isObjCGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
- // Things are more complicated with garbage collection. If the
- // returned object is suppose to be an Objective-C object, we have
- // a leak (as the caller expects a GC'ed object) because no
- // method should return ownership unless it returns a CF object.
- hasError = true;
- X = X ^ RefVal::ErrorGCLeakReturned;
- }
- else if (!RE.isOwned()) {
- // Either we are using GC and the returned object is a CF type
- // or we aren't using GC. In either case, we expect that the
- // enclosing method is expected to return ownership.
- hasError = true;
- X = X ^ RefVal::ErrorLeakReturned;
- }
-
- if (hasError) {
- // Generate an error node.
- state = setRefBinding(state, Sym, X);
-
- static CheckerProgramPointTag ReturnOwnLeakTag(this, "ReturnsOwnLeak");
- ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
- if (N) {
- const LangOptions &LOpts = C.getASTContext().getLangOpts();
- bool GCEnabled = C.isObjCGCEnabled();
- C.emitReport(std::unique_ptr<BugReport>(new CFRefLeakReport(
- *getLeakAtReturnBug(LOpts, GCEnabled), LOpts, GCEnabled,
- SummaryLog, N, Sym, C, IncludeAllocationLine)));
- }
- }
- }
- } else if (X.isReturnedNotOwned()) {
- if (RE.isOwned()) {
- if (X.getIvarAccessHistory() ==
- RefVal::IvarAccessHistory::AccessedDirectly) {
- // Assume the method was trying to transfer a +1 reference from a
- // strong ivar to the caller.
- state = setRefBinding(state, Sym,
- X.releaseViaIvar() ^ RefVal::ReturnedOwned);
- } else {
- // Trying to return a not owned object to a caller expecting an
- // owned object.
- state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
-
- static CheckerProgramPointTag
- ReturnNotOwnedTag(this, "ReturnNotOwnedForOwned");
-
- ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
- if (N) {
- if (!returnNotOwnedForOwned)
- returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
-
- C.emitReport(std::unique_ptr<BugReport>(new CFRefReport(
- *returnNotOwnedForOwned, C.getASTContext().getLangOpts(),
- C.isObjCGCEnabled(), SummaryLog, N, Sym)));
- }
- }
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Check various ways a symbol can be invalidated.
-//===----------------------------------------------------------------------===//
-
-void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
- CheckerContext &C) const {
- // Are we storing to something that causes the value to "escape"?
- bool escapes = true;
-
- // A value escapes in three possible cases (this may change):
- //
- // (1) we are binding to something that is not a memory region.
- // (2) we are binding to a memregion that does not have stack storage
- // (3) we are binding to a memregion with stack storage that the store
- // does not understand.
- ProgramStateRef state = C.getState();
-
- if (Optional<loc::MemRegionVal> regionLoc = loc.getAs<loc::MemRegionVal>()) {
- escapes = !regionLoc->getRegion()->hasStackStorage();
-
- if (!escapes) {
- // To test (3), generate a new state with the binding added. If it is
- // the same state, then it escapes (since the store cannot represent
- // the binding).
- // Do this only if we know that the store is not supposed to generate the
- // same state.
- SVal StoredVal = state->getSVal(regionLoc->getRegion());
- if (StoredVal != val)
- escapes = (state == (state->bindLoc(*regionLoc, val, C.getLocationContext())));
- }
- if (!escapes) {
- // Case 4: We do not currently model what happens when a symbol is
- // assigned to a struct field, so be conservative here and let the symbol
- // go. TODO: This could definitely be improved upon.
- escapes = !isa<VarRegion>(regionLoc->getRegion());
- }
- }
-
- // If we are storing the value into an auto function scope variable annotated
- // with (__attribute__((cleanup))), stop tracking the value to avoid leak
- // false positives.
- if (const VarRegion *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
- const VarDecl *VD = LVR->getDecl();
- if (VD->hasAttr<CleanupAttr>()) {
- escapes = true;
- }
- }
-
- // If our store can represent the binding and we aren't storing to something
- // that doesn't have local storage then just return and have the simulation
- // state continue as is.
- if (!escapes)
- return;
-
- // Otherwise, find all symbols referenced by 'val' that we are tracking
- // and stop tracking them.
- state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
- C.addTransition(state);
-}
-
-ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
- SVal Cond,
- bool Assumption) const {
- // FIXME: We may add to the interface of evalAssume the list of symbols
- // whose assumptions have changed. For now we just iterate through the
- // bindings and check if any of the tracked symbols are NULL. This isn't
- // too bad since the number of symbols we will track in practice are
- // probably small and evalAssume is only called at branches and a few
- // other places.
- RefBindingsTy B = state->get<RefBindings>();
-
- if (B.isEmpty())
- return state;
-
- bool changed = false;
- RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
-
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- // Check if the symbol is null stop tracking the symbol.
- ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
- if (AllocFailed.isConstrainedTrue()) {
- changed = true;
- B = RefBFactory.remove(B, I.getKey());
- }
- }
-
- if (changed)
- state = state->set<RefBindings>(B);
-
- return state;
-}
-
-ProgramStateRef
-RetainCountChecker::checkRegionChanges(ProgramStateRef state,
- const InvalidatedSymbols *invalidated,
- ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
- const LocationContext *LCtx,
- const CallEvent *Call) const {
- if (!invalidated)
- return state;
-
- llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
- for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
- E = ExplicitRegions.end(); I != E; ++I) {
- if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
- WhitelistedSymbols.insert(SR->getSymbol());
- }
-
- for (InvalidatedSymbols::const_iterator I=invalidated->begin(),
- E = invalidated->end(); I!=E; ++I) {
- SymbolRef sym = *I;
- if (WhitelistedSymbols.count(sym))
- continue;
- // Remove any existing reference-count binding.
- state = removeRefBinding(state, sym);
- }
- return state;
-}
-
-//===----------------------------------------------------------------------===//
-// Handle dead symbols and end-of-path.
-//===----------------------------------------------------------------------===//
-
-ProgramStateRef
-RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
- ExplodedNode *Pred,
- const ProgramPointTag *Tag,
- CheckerContext &Ctx,
- SymbolRef Sym, RefVal V) const {
- unsigned ACnt = V.getAutoreleaseCount();
-
- // No autorelease counts? Nothing to be done.
- if (!ACnt)
- return state;
-
- assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
- unsigned Cnt = V.getCount();
-
- // FIXME: Handle sending 'autorelease' to already released object.
-
- if (V.getKind() == RefVal::ReturnedOwned)
- ++Cnt;
-
- // If we would over-release here, but we know the value came from an ivar,
- // assume it was a strong ivar that's just been relinquished.
- if (ACnt > Cnt &&
- V.getIvarAccessHistory() == RefVal::IvarAccessHistory::AccessedDirectly) {
- V = V.releaseViaIvar();
- --ACnt;
- }
-
- if (ACnt <= Cnt) {
- if (ACnt == Cnt) {
- V.clearCounts();
- if (V.getKind() == RefVal::ReturnedOwned)
- V = V ^ RefVal::ReturnedNotOwned;
- else
- V = V ^ RefVal::NotOwned;
- } else {
- V.setCount(V.getCount() - ACnt);
- V.setAutoreleaseCount(0);
- }
- return setRefBinding(state, Sym, V);
- }
-
- // HACK: Ignore retain-count issues on values accessed through ivars,
- // because of cases like this:
- // [_contentView retain];
- // [_contentView removeFromSuperview];
- // [self addSubview:_contentView]; // invalidates 'self'
- // [_contentView release];
- if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
- return state;
-
- // Woah! More autorelease counts then retain counts left.
- // Emit hard error.
- V = V ^ RefVal::ErrorOverAutorelease;
- state = setRefBinding(state, Sym, V);
-
- ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
- if (N) {
- SmallString<128> sbuf;
- llvm::raw_svector_ostream os(sbuf);
- os << "Object was autoreleased ";
- if (V.getAutoreleaseCount() > 1)
- os << V.getAutoreleaseCount() << " times but the object ";
- else
- os << "but ";
- os << "has a +" << V.getCount() << " retain count";
-
- if (!overAutorelease)
- overAutorelease.reset(new OverAutorelease(this));
-
- const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- Ctx.emitReport(std::unique_ptr<BugReport>(
- new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
- SummaryLog, N, Sym, os.str())));
- }
-
- return nullptr;
-}
-
-ProgramStateRef
-RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
- SymbolRef sid, RefVal V,
- SmallVectorImpl<SymbolRef> &Leaked) const {
- bool hasLeak;
-
- // HACK: Ignore retain-count issues on values accessed through ivars,
- // because of cases like this:
- // [_contentView retain];
- // [_contentView removeFromSuperview];
- // [self addSubview:_contentView]; // invalidates 'self'
- // [_contentView release];
- if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
- hasLeak = false;
- else if (V.isOwned())
- hasLeak = true;
- else if (V.isNotOwned() || V.isReturnedOwned())
- hasLeak = (V.getCount() > 0);
- else
- hasLeak = false;
-
- if (!hasLeak)
- return removeRefBinding(state, sid);
-
- Leaked.push_back(sid);
- return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
-}
-
-ExplodedNode *
-RetainCountChecker::processLeaks(ProgramStateRef state,
- SmallVectorImpl<SymbolRef> &Leaked,
- CheckerContext &Ctx,
- ExplodedNode *Pred) const {
- // Generate an intermediate node representing the leak point.
- ExplodedNode *N = Ctx.addTransition(state, Pred);
-
- if (N) {
- for (SmallVectorImpl<SymbolRef>::iterator
- I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
-
- const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
- bool GCEnabled = Ctx.isObjCGCEnabled();
- CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
- : getLeakAtReturnBug(LOpts, GCEnabled);
- assert(BT && "BugType not initialized.");
-
- Ctx.emitReport(std::unique_ptr<BugReport>(
- new CFRefLeakReport(*BT, LOpts, GCEnabled, SummaryLog, N, *I, Ctx,
- IncludeAllocationLine)));
- }
- }
-
- return N;
-}
-
-void RetainCountChecker::checkBeginFunction(CheckerContext &Ctx) const {
- if (!Ctx.inTopFrame())
- return;
-
- const LocationContext *LCtx = Ctx.getLocationContext();
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(LCtx->getDecl());
-
- if (!FD || isTrustedReferenceCountImplementation(FD))
- return;
-
- ProgramStateRef state = Ctx.getState();
-
- const RetainSummary *FunctionSummary = getSummaryManager(Ctx).getFunctionSummary(FD);
- ArgEffects CalleeSideArgEffects = FunctionSummary->getArgEffects();
-
- for (unsigned idx = 0, e = FD->getNumParams(); idx != e; ++idx) {
- const ParmVarDecl *Param = FD->getParamDecl(idx);
- SymbolRef Sym = state->getSVal(state->getRegion(Param, LCtx)).getAsSymbol();
-
- QualType Ty = Param->getType();
- const ArgEffect *AE = CalleeSideArgEffects.lookup(idx);
- if (AE && *AE == DecRef && isGeneralizedObjectRef(Ty))
- state = setRefBinding(state, Sym, RefVal::makeOwned(RetEffect::ObjKind::Generalized, Ty));
- else if (isGeneralizedObjectRef(Ty))
- state = setRefBinding(state, Sym, RefVal::makeNotOwned(RetEffect::ObjKind::Generalized, Ty));
- }
-
- Ctx.addTransition(state);
-}
-
-void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
- CheckerContext &Ctx) const {
- ProgramStateRef state = Ctx.getState();
- RefBindingsTy B = state->get<RefBindings>();
- ExplodedNode *Pred = Ctx.getPredecessor();
-
- // Don't process anything within synthesized bodies.
- const LocationContext *LCtx = Pred->getLocationContext();
- if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized()) {
- assert(!LCtx->inTopFrame());
- return;
- }
-
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- state = handleAutoreleaseCounts(state, Pred, /*Tag=*/nullptr, Ctx,
- I->first, I->second);
- if (!state)
- return;
- }
-
- // If the current LocationContext has a parent, don't check for leaks.
- // We will do that later.
- // FIXME: we should instead check for imbalances of the retain/releases,
- // and suggest annotations.
- if (LCtx->getParent())
- return;
-
- B = state->get<RefBindings>();
- SmallVector<SymbolRef, 10> Leaked;
-
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
- state = handleSymbolDeath(state, I->first, I->second, Leaked);
-
- processLeaks(state, Leaked, Ctx, Pred);
-}
-
-const ProgramPointTag *
-RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
- const CheckerProgramPointTag *&tag = DeadSymbolTags[sym];
- if (!tag) {
- SmallString<64> buf;
- llvm::raw_svector_ostream out(buf);
- out << "Dead Symbol : ";
- sym->dumpToStream(out);
- tag = new CheckerProgramPointTag(this, out.str());
- }
- return tag;
-}
-
-void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
- CheckerContext &C) const {
- ExplodedNode *Pred = C.getPredecessor();
-
- ProgramStateRef state = C.getState();
- RefBindingsTy B = state->get<RefBindings>();
- SmallVector<SymbolRef, 10> Leaked;
-
- // Update counts from autorelease pools
- for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
- SymbolRef Sym = *I;
- if (const RefVal *T = B.lookup(Sym)){
- // Use the symbol as the tag.
- // FIXME: This might not be as unique as we would like.
- const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
- state = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T);
- if (!state)
- return;
-
- // Fetch the new reference count from the state, and use it to handle
- // this symbol.
- state = handleSymbolDeath(state, *I, *getRefBinding(state, Sym), Leaked);
- }
- }
-
- if (Leaked.empty()) {
- C.addTransition(state);
- return;
- }
-
- Pred = processLeaks(state, Leaked, C, Pred);
-
- // Did we cache out?
- if (!Pred)
- return;
-
- // Now generate a new node that nukes the old bindings.
- // The only bindings left at this point are the leaked symbols.
- RefBindingsTy::Factory &F = state->get_context<RefBindings>();
- B = state->get<RefBindings>();
-
- for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
- E = Leaked.end();
- I != E; ++I)
- B = F.remove(B, *I);
-
- state = state->set<RefBindings>(B);
- C.addTransition(state, Pred);
-}
-
-void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const {
-
- RefBindingsTy B = State->get<RefBindings>();
-
- if (B.isEmpty())
- return;
-
- Out << Sep << NL;
-
- for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- Out << I->first << " : ";
- I->second.print(Out);
- Out << NL;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Checker registration.
-//===----------------------------------------------------------------------===//
-
-void ento::registerRetainCountChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
-}
-
-//===----------------------------------------------------------------------===//
-// Implementation of the CallEffects API.
-//===----------------------------------------------------------------------===//
-
-namespace clang {
-namespace ento {
-namespace objc_retain {
-
-// This is a bit gross, but it allows us to populate CallEffects without
-// creating a bunch of accessors. This kind is very localized, so the
-// damage of this macro is limited.
-#define createCallEffect(D, KIND)\
- ASTContext &Ctx = D->getASTContext();\
- LangOptions L = Ctx.getLangOpts();\
- RetainSummaryManager M(Ctx, L.GCOnly, L.ObjCAutoRefCount);\
- const RetainSummary *S = M.get ## KIND ## Summary(D);\
- CallEffects CE(S->getRetEffect());\
- CE.Receiver = S->getReceiverEffect();\
- unsigned N = D->param_size();\
- for (unsigned i = 0; i < N; ++i) {\
- CE.Args.push_back(S->getArg(i));\
- }
-
-CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
- createCallEffect(MD, Method);
- return CE;
-}
-
-CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
- createCallEffect(FD, Function);
- return CE;
-}
-
-#undef createCallEffect
-
-} // end namespace objc_retain
-} // end namespace ento
-} // end namespace clang
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
new file mode 100644
index 000000000000..0652af856643
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -0,0 +1,1547 @@
+//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "RetainCountChecker.h"
+
+using namespace clang;
+using namespace ento;
+using namespace retaincountchecker;
+using llvm::StrInStrNoCase;
+
+REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
+
+namespace clang {
+namespace ento {
+namespace retaincountchecker {
+
+const RefVal *getRefBinding(ProgramStateRef State, SymbolRef Sym) {
+ return State->get<RefBindings>(Sym);
+}
+
+ProgramStateRef setRefBinding(ProgramStateRef State, SymbolRef Sym,
+ RefVal Val) {
+ assert(Sym != nullptr);
+ return State->set<RefBindings>(Sym, Val);
+}
+
+ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
+ return State->remove<RefBindings>(Sym);
+}
+
+class UseAfterRelease : public RefCountBug {
+public:
+ UseAfterRelease(const CheckerBase *checker)
+ : RefCountBug(checker, "Use-after-release") {}
+
+ const char *getDescription() const override {
+ return "Reference-counted object is used after it is released";
+ }
+};
+
+class BadRelease : public RefCountBug {
+public:
+ BadRelease(const CheckerBase *checker) : RefCountBug(checker, "Bad release") {}
+
+ const char *getDescription() const override {
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ }
+};
+
+class DeallocNotOwned : public RefCountBug {
+public:
+ DeallocNotOwned(const CheckerBase *checker)
+ : RefCountBug(checker, "-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const override {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+};
+
+class OverAutorelease : public RefCountBug {
+public:
+ OverAutorelease(const CheckerBase *checker)
+ : RefCountBug(checker, "Object autoreleased too many times") {}
+
+ const char *getDescription() const override {
+ return "Object autoreleased too many times";
+ }
+};
+
+class ReturnedNotOwnedForOwned : public RefCountBug {
+public:
+ ReturnedNotOwnedForOwned(const CheckerBase *checker)
+ : RefCountBug(checker, "Method should return an owned object") {}
+
+ const char *getDescription() const override {
+ return "Object with a +0 retain count returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+};
+
+class Leak : public RefCountBug {
+public:
+ Leak(const CheckerBase *checker, StringRef name) : RefCountBug(checker, name) {
+ // Leaks should not be reported if they are post-dominated by a sink.
+ setSuppressOnSink(true);
+ }
+
+ const char *getDescription() const override { return ""; }
+
+ bool isLeak() const override { return true; }
+};
+
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+void RefVal::print(raw_ostream &Out) const {
+ if (!T.isNull())
+ Out << "Tracked " << T.getAsString() << " | ";
+
+ switch (getKind()) {
+ default: llvm_unreachable("Invalid RefVal kind");
+ case Owned: {
+ Out << "Owned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case NotOwned: {
+ Out << "NotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedOwned: {
+ Out << "ReturnedOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedNotOwned: {
+ Out << "ReturnedNotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case Released:
+ Out << "Released";
+ break;
+
+ case ErrorDeallocNotOwned:
+ Out << "-dealloc (not-owned)";
+ break;
+
+ case ErrorLeak:
+ Out << "Leaked";
+ break;
+
+ case ErrorLeakReturned:
+ Out << "Leaked (Bad naming)";
+ break;
+
+ case ErrorUseAfterRelease:
+ Out << "Use-After-Release [ERROR]";
+ break;
+
+ case ErrorReleaseNotOwned:
+ Out << "Release of Not-Owned [ERROR]";
+ break;
+
+ case RefVal::ErrorOverAutorelease:
+ Out << "Over-autoreleased";
+ break;
+
+ case RefVal::ErrorReturnedNotOwned:
+ Out << "Non-owned object returned instead of owned";
+ break;
+ }
+
+ switch (getIvarAccessHistory()) {
+ case IvarAccessHistory::None:
+ break;
+ case IvarAccessHistory::AccessedDirectly:
+ Out << " [direct ivar access]";
+ break;
+ case IvarAccessHistory::ReleasedAfterDirectAccess:
+ Out << " [released after direct ivar access]";
+ }
+
+ if (ACnt) {
+ Out << " [autorelease -" << ACnt << ']';
+ }
+}
+
+namespace {
+class StopTrackingCallback final : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) override {
+ state = state->remove<RefBindings>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Handle statements that may have an effect on refcounts.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ // FIXME: For now we invalidate the tracking of all symbols passed to blocks
+ // via captured variables, even though captured variables result in a copy
+ // and in implicit increment/decrement of a retain count.
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = I.getCapturedRegion();
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state = state->scanReachableSymbols<StopTrackingCallback>(Regions).getState();
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
+ if (!BE)
+ return;
+
+ ArgEffect AE = ArgEffect(IncRef, ObjKind::ObjC);
+
+ switch (BE->getBridgeKind()) {
+ case OBC_Bridge:
+ // Do nothing.
+ return;
+ case OBC_BridgeRetained:
+ AE = AE.withKind(IncRef);
+ break;
+ case OBC_BridgeTransfer:
+ AE = AE.withKind(DecRefBridgedTransferred);
+ break;
+ }
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = C.getSVal(CE).getAsLocSymbol();
+ if (!Sym)
+ return;
+ const RefVal* T = getRefBinding(state, Sym);
+ if (!T)
+ return;
+
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, Sym, *T, AE, hasErr, C);
+
+ if (hasErr) {
+ // FIXME: If we get an error during a bridge cast, should we report it?
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::processObjCLiterals(CheckerContext &C,
+ const Expr *Ex) const {
+ ProgramStateRef state = C.getState();
+ const ExplodedNode *pred = C.getPredecessor();
+ for (const Stmt *Child : Ex->children()) {
+ SVal V = pred->getSVal(Child);
+ if (SymbolRef sym = V.getAsSymbol())
+ if (const RefVal* T = getRefBinding(state, sym)) {
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, sym, *T,
+ ArgEffect(MayEscape, ObjKind::ObjC), hasErr, C);
+ if (hasErr) {
+ processNonLeakError(state, Child->getSourceRange(), hasErr, sym, C);
+ return;
+ }
+ }
+ }
+
+ // Return the object as autoreleased.
+ // RetEffect RE = RetEffect::MakeNotOwned(ObjKind::ObjC);
+ if (SymbolRef sym =
+ state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ state = setRefBinding(state, sym,
+ RefVal::makeNotOwned(ObjKind::ObjC, ResultTy));
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all values.
+ processObjCLiterals(C, AL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all keys and values.
+ processObjCLiterals(C, DL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
+ CheckerContext &C) const {
+ const ExplodedNode *Pred = C.getPredecessor();
+ ProgramStateRef State = Pred->getState();
+
+ if (SymbolRef Sym = Pred->getSVal(Ex).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ State = setRefBinding(State, Sym,
+ RefVal::makeNotOwned(ObjKind::ObjC, ResultTy));
+ }
+
+ C.addTransition(State);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
+ CheckerContext &C) const {
+ Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
+ if (!IVarLoc)
+ return;
+
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol();
+ if (!Sym || !dyn_cast_or_null<ObjCIvarRegion>(Sym->getOriginRegion()))
+ return;
+
+ // Accessing an ivar directly is unusual. If we've done that, be more
+ // forgiving about what the surrounding code is allowed to do.
+
+ QualType Ty = Sym->getType();
+ ObjKind Kind;
+ if (Ty->isObjCRetainableType())
+ Kind = ObjKind::ObjC;
+ else if (coreFoundation::isCFObjectRef(Ty))
+ Kind = ObjKind::CF;
+ else
+ return;
+
+ // If the value is already known to be nil, don't bother tracking it.
+ ConstraintManager &CMgr = State->getConstraintManager();
+ if (CMgr.isNull(State, Sym).isConstrainedTrue())
+ return;
+
+ if (const RefVal *RV = getRefBinding(State, Sym)) {
+ // If we've seen this symbol before, or we're only seeing it now because
+ // of something the analyzer has synthesized, don't do anything.
+ if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None ||
+ isSynthesizedAccessor(C.getStackFrame())) {
+ return;
+ }
+
+ // Note that this value has been loaded from an ivar.
+ C.addTransition(setRefBinding(State, Sym, RV->withIvarAccess()));
+ return;
+ }
+
+ RefVal PlusZero = RefVal::makeNotOwned(Kind, Ty);
+
+ // In a synthesized accessor, the effective retain count is +0.
+ if (isSynthesizedAccessor(C.getStackFrame())) {
+ C.addTransition(setRefBinding(State, Sym, PlusZero));
+ return;
+ }
+
+ State = setRefBinding(State, Sym, PlusZero.withIvarAccess());
+ C.addTransition(State);
+}
+
+void RetainCountChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+
+ // Leave null if no receiver.
+ QualType ReceiverType;
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+ if (MC->isInstanceMessage()) {
+ SVal ReceiverV = MC->getReceiverSVal();
+ if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
+ if (const RefVal *T = getRefBinding(C.getState(), Sym))
+ ReceiverType = T->getType();
+ }
+ }
+
+ const RetainSummary *Summ = Summaries.getSummary(Call, ReceiverType);
+
+ if (C.wasInlined) {
+ processSummaryOfInlined(*Summ, Call, C);
+ return;
+ }
+ checkSummary(*Summ, Call, C);
+}
+
+RefCountBug *
+RetainCountChecker::getLeakWithinFunctionBug(const LangOptions &LOpts) const {
+ if (!leakWithinFunction)
+ leakWithinFunction.reset(new Leak(this, "Leak"));
+ return leakWithinFunction.get();
+}
+
+RefCountBug *
+RetainCountChecker::getLeakAtReturnBug(const LangOptions &LOpts) const {
+ if (!leakAtReturn)
+ leakAtReturn.reset(new Leak(this, "Leak of returned object"));
+ return leakAtReturn.get();
+}
+
+/// GetReturnType - Used to get the return type of a message expression or
+/// function call with the intention of affixing that type to a tracked symbol.
+/// While the return type can be queried directly from RetEx, when
+/// invoking class methods we augment to the return type to be that of
+/// a pointer to the class (as opposed it just being id).
+// FIXME: We may be able to do this with related result types instead.
+// This function is probably overestimating.
+static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
+ QualType RetTy = RetE->getType();
+ // If RetE is not a message expression just return its type.
+ // If RetE is a message expression, return its types if it is something
+ /// more specific than id.
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+ if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ PT->isObjCClassType()) {
+ // At this point we know the return type of the message expression is
+ // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+ // is a call to a class method whose type we can resolve. In such
+ // cases, promote the return type to XXX* (where XXX is the class).
+ const ObjCInterfaceDecl *D = ME->getReceiverInterface();
+ return !D ? RetTy :
+ Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
+ }
+
+ return RetTy;
+}
+
+static Optional<RefVal> refValFromRetEffect(RetEffect RE,
+ QualType ResultTy) {
+ if (RE.isOwned()) {
+ return RefVal::makeOwned(RE.getObjKind(), ResultTy);
+ } else if (RE.notOwned()) {
+ return RefVal::makeNotOwned(RE.getObjKind(), ResultTy);
+ }
+
+ return None;
+}
+
+static bool isPointerToObject(QualType QT) {
+ QualType PT = QT->getPointeeType();
+ if (!PT.isNull())
+ if (PT->getAsCXXRecordDecl())
+ return true;
+ return false;
+}
+
+/// Whether the tracked value should be escaped on a given call.
+/// OSObjects are escaped when passed to void * / etc.
+static bool shouldEscapeOSArgumentOnCall(const CallEvent &CE, unsigned ArgIdx,
+ const RefVal *TrackedValue) {
+ if (TrackedValue->getObjKind() != ObjKind::OS)
+ return false;
+ if (ArgIdx >= CE.parameters().size())
+ return false;
+ return !isPointerToObject(CE.parameters()[ArgIdx]->getType());
+}
+
+// We don't always get the exact modeling of the function with regards to the
+// retain count checker even when the function is inlined. For example, we need
+// to stop tracking the symbols which were marked with StopTrackingHard.
+void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ bool ShouldRemoveBinding = Summ.getArg(idx).getKind() == StopTrackingHard;
+ if (const RefVal *T = getRefBinding(state, Sym))
+ if (shouldEscapeOSArgumentOnCall(CallOrMsg, idx, T))
+ ShouldRemoveBinding = true;
+
+ if (ShouldRemoveBinding)
+ state = removeRefBinding(state, Sym);
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ if (const auto *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg)) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (Summ.getReceiverEffect().getKind() == StopTrackingHard) {
+ state = removeRefBinding(state, Sym);
+ }
+ }
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol()) {
+ if (RE.getKind() == RetEffect::NoRetHard)
+ state = removeRefBinding(state, Sym);
+ }
+
+ C.addTransition(state);
+}
+
+static bool shouldEscapeRegion(const MemRegion *R) {
+
+ // We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ return !R->hasStackStorage() || !isa<VarRegion>(R);
+}
+
+static SmallVector<ProgramStateRef, 2>
+updateOutParameters(ProgramStateRef State, const RetainSummary &Summ,
+ const CallEvent &CE) {
+
+ SVal L = CE.getReturnValue();
+
+ // Splitting is required to support out parameters,
+ // as out parameters might be created only on the "success" branch.
+ // We want to avoid eagerly splitting unless out parameters are actually
+ // needed.
+ bool SplitNecessary = false;
+ for (auto &P : Summ.getArgEffects())
+ if (P.second.getKind() == RetainedOutParameterOnNonZero ||
+ P.second.getKind() == RetainedOutParameterOnZero)
+ SplitNecessary = true;
+
+ ProgramStateRef AssumeNonZeroReturn = State;
+ ProgramStateRef AssumeZeroReturn = State;
+
+ if (SplitNecessary) {
+ if (auto DL = L.getAs<DefinedOrUnknownSVal>()) {
+ AssumeNonZeroReturn = AssumeNonZeroReturn->assume(*DL, true);
+ AssumeZeroReturn = AssumeZeroReturn->assume(*DL, false);
+ }
+ }
+
+ for (unsigned idx = 0, e = CE.getNumArgs(); idx != e; ++idx) {
+ SVal ArgVal = CE.getArgSVal(idx);
+ ArgEffect AE = Summ.getArg(idx);
+
+ auto *ArgRegion = dyn_cast_or_null<TypedValueRegion>(ArgVal.getAsRegion());
+ if (!ArgRegion)
+ continue;
+
+ QualType PointeeTy = ArgRegion->getValueType();
+ SVal PointeeVal = State->getSVal(ArgRegion);
+ SymbolRef Pointee = PointeeVal.getAsLocSymbol();
+ if (!Pointee)
+ continue;
+
+ if (shouldEscapeRegion(ArgRegion))
+ continue;
+
+ auto makeNotOwnedParameter = [&](ProgramStateRef St) {
+ return setRefBinding(St, Pointee,
+ RefVal::makeNotOwned(AE.getObjKind(), PointeeTy));
+ };
+ auto makeOwnedParameter = [&](ProgramStateRef St) {
+ return setRefBinding(St, Pointee,
+ RefVal::makeOwned(ObjKind::OS, PointeeTy));
+ };
+
+ switch (AE.getKind()) {
+ case UnretainedOutParameter:
+ AssumeNonZeroReturn = makeNotOwnedParameter(AssumeNonZeroReturn);
+ AssumeZeroReturn = makeNotOwnedParameter(AssumeZeroReturn);
+ break;
+ case RetainedOutParameter:
+ AssumeNonZeroReturn = makeOwnedParameter(AssumeNonZeroReturn);
+ AssumeZeroReturn = makeOwnedParameter(AssumeZeroReturn);
+ break;
+ case RetainedOutParameterOnNonZero:
+ AssumeNonZeroReturn = makeOwnedParameter(AssumeNonZeroReturn);
+ break;
+ case RetainedOutParameterOnZero:
+ AssumeZeroReturn = makeOwnedParameter(AssumeZeroReturn);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (SplitNecessary) {
+ return {AssumeNonZeroReturn, AssumeZeroReturn};
+ } else {
+ assert(AssumeZeroReturn == AssumeNonZeroReturn);
+ return {AssumeZeroReturn};
+ }
+}
+
+void RetainCountChecker::checkSummary(const RetainSummary &Summ,
+ const CallEvent &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ SourceRange ErrorRange;
+ SymbolRef ErrorSym = nullptr;
+
+ // Helper tag for providing diagnostics: indicate whether dealloc was sent
+ // at this location.
+ static CheckerProgramPointTag DeallocSentTag(this, DeallocTagDescription);
+ bool DeallocSent = false;
+
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+
+ ArgEffect Effect = Summ.getArg(idx);
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+
+ if (shouldEscapeOSArgumentOnCall(CallOrMsg, idx, T))
+ Effect = ArgEffect(StopTrackingHard, ObjKind::OS);
+
+ state = updateSymbol(state, Sym, *T, Effect, hasErr, C);
+ if (hasErr) {
+ ErrorRange = CallOrMsg.getArgSourceRange(idx);
+ ErrorSym = Sym;
+ break;
+ } else if (Effect.getKind() == Dealloc) {
+ DeallocSent = true;
+ }
+ }
+ }
+ }
+
+ // Evaluate the effect on the message receiver / `this` argument.
+ bool ReceiverIsTracked = false;
+ if (!hasErr) {
+ if (const auto *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg)) {
+ if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+ ReceiverIsTracked = true;
+ state = updateSymbol(state, Sym, *T,
+ Summ.getReceiverEffect(), hasErr, C);
+ if (hasErr) {
+ ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
+ ErrorSym = Sym;
+ } else if (Summ.getReceiverEffect().getKind() == Dealloc) {
+ DeallocSent = true;
+ }
+ }
+ }
+ } else if (const auto *MCall = dyn_cast<CXXMemberCall>(&CallOrMsg)) {
+ if (SymbolRef Sym = MCall->getCXXThisVal().getAsLocSymbol()) {
+ if (const RefVal *T = getRefBinding(state, Sym)) {
+ state = updateSymbol(state, Sym, *T, Summ.getThisEffect(),
+ hasErr, C);
+ if (hasErr) {
+ ErrorRange = MCall->getOriginExpr()->getSourceRange();
+ ErrorSym = Sym;
+ }
+ }
+ }
+ }
+ }
+
+ // Process any errors.
+ if (hasErr) {
+ processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
+ return;
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+ if (ReceiverIsTracked)
+ RE = getSummaryManager(C).getObjAllocRetEffect();
+ else
+ RE = RetEffect::MakeNoRet();
+ }
+
+ if (SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol()) {
+ QualType ResultTy = CallOrMsg.getResultType();
+ if (RE.notOwned()) {
+ const Expr *Ex = CallOrMsg.getOriginExpr();
+ assert(Ex);
+ ResultTy = GetReturnType(Ex, C.getASTContext());
+ }
+ if (Optional<RefVal> updatedRefVal = refValFromRetEffect(RE, ResultTy))
+ state = setRefBinding(state, Sym, *updatedRefVal);
+ }
+
+ SmallVector<ProgramStateRef, 2> Out =
+ updateOutParameters(state, Summ, CallOrMsg);
+
+ for (ProgramStateRef St : Out) {
+ if (DeallocSent) {
+ C.addTransition(St, C.getPredecessor(), &DeallocSentTag);
+ } else {
+ C.addTransition(St);
+ }
+ }
+}
+
+ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state,
+ SymbolRef sym, RefVal V,
+ ArgEffect AE,
+ RefVal::Kind &hasErr,
+ CheckerContext &C) const {
+ bool IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
+ if (AE.getObjKind() == ObjKind::ObjC && IgnoreRetainMsg) {
+ switch (AE.getKind()) {
+ default:
+ break;
+ case IncRef:
+ AE = AE.withKind(DoNothing);
+ break;
+ case DecRef:
+ AE = AE.withKind(DoNothing);
+ break;
+ case DecRefAndStopTrackingHard:
+ AE = AE.withKind(StopTracking);
+ break;
+ }
+ }
+
+ // Handle all use-after-releases.
+ if (V.getKind() == RefVal::Released) {
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ return setRefBinding(state, sym, V);
+ }
+
+ switch (AE.getKind()) {
+ case UnretainedOutParameter:
+ case RetainedOutParameter:
+ case RetainedOutParameterOnZero:
+ case RetainedOutParameterOnNonZero:
+ llvm_unreachable("Applies to pointer-to-pointer parameters, which should "
+ "not have ref state.");
+
+ case Dealloc: // NB. we only need to add a note in a non-error case.
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
+ case RefVal::Owned:
+ // The object immediately transitions to the released state.
+ V = V ^ RefVal::Released;
+ V.clearCounts();
+ return setRefBinding(state, sym, V);
+ case RefVal::NotOwned:
+ V = V ^ RefVal::ErrorDeallocNotOwned;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+
+ case MayEscape:
+ if (V.getKind() == RefVal::Owned) {
+ V = V ^ RefVal::NotOwned;
+ break;
+ }
+
+ LLVM_FALLTHROUGH;
+
+ case DoNothing:
+ return state;
+
+ case Autorelease:
+ // Update the autorelease counts.
+ V = V.autorelease();
+ break;
+
+ case StopTracking:
+ case StopTrackingHard:
+ return removeRefBinding(state, sym);
+
+ case IncRef:
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for a retain.");
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ V = V + 1;
+ break;
+ }
+ break;
+
+ case DecRef:
+ case DecRefBridgedTransferred:
+ case DecRefAndStopTrackingHard:
+ switch (V.getKind()) {
+ default:
+ // case 'RefVal::Released' handled above.
+ llvm_unreachable("Invalid RefVal state for a release.");
+
+ case RefVal::Owned:
+ assert(V.getCount() > 0);
+ if (V.getCount() == 1) {
+ if (AE.getKind() == DecRefBridgedTransferred ||
+ V.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly)
+ V = V ^ RefVal::NotOwned;
+ else
+ V = V ^ RefVal::Released;
+ } else if (AE.getKind() == DecRefAndStopTrackingHard) {
+ return removeRefBinding(state, sym);
+ }
+
+ V = V - 1;
+ break;
+
+ case RefVal::NotOwned:
+ if (V.getCount() > 0) {
+ if (AE.getKind() == DecRefAndStopTrackingHard)
+ return removeRefBinding(state, sym);
+ V = V - 1;
+ } else if (V.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly) {
+ // Assume that the instance variable was holding on the object at
+ // +1, and we just didn't know.
+ if (AE.getKind() == DecRefAndStopTrackingHard)
+ return removeRefBinding(state, sym);
+ V = V.releaseViaIvar() ^ RefVal::Released;
+ } else {
+ V = V ^ RefVal::ErrorReleaseNotOwned;
+ hasErr = V.getKind();
+ }
+ break;
+ }
+ break;
+ }
+ return setRefBinding(state, sym, V);
+}
+
+void RetainCountChecker::processNonLeakError(ProgramStateRef St,
+ SourceRange ErrorRange,
+ RefVal::Kind ErrorKind,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (const RefVal *RV = getRefBinding(St, Sym))
+ if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return;
+
+ ExplodedNode *N = C.generateErrorNode(St);
+ if (!N)
+ return;
+
+ RefCountBug *BT;
+ switch (ErrorKind) {
+ default:
+ llvm_unreachable("Unhandled error.");
+ case RefVal::ErrorUseAfterRelease:
+ if (!useAfterRelease)
+ useAfterRelease.reset(new UseAfterRelease(this));
+ BT = useAfterRelease.get();
+ break;
+ case RefVal::ErrorReleaseNotOwned:
+ if (!releaseNotOwned)
+ releaseNotOwned.reset(new BadRelease(this));
+ BT = releaseNotOwned.get();
+ break;
+ case RefVal::ErrorDeallocNotOwned:
+ if (!deallocNotOwned)
+ deallocNotOwned.reset(new DeallocNotOwned(this));
+ BT = deallocNotOwned.get();
+ break;
+ }
+
+ assert(BT);
+ auto report = llvm::make_unique<RefCountReport>(
+ *BT, C.getASTContext().getLangOpts(), N, Sym);
+ report->addRange(ErrorRange);
+ C.emitReport(std::move(report));
+}
+
+//===----------------------------------------------------------------------===//
+// Handle the return values of retain-count-related functions.
+//===----------------------------------------------------------------------===//
+
+bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ // Get the callee. We're only interested in simple C functions.
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ RetainSummaryManager &SmrMgr = getSummaryManager(C);
+ QualType ResultTy = CE->getCallReturnType(C.getASTContext());
+
+ // See if the function has 'rc_ownership_trusted_implementation'
+ // annotate attribute. If it does, we will not inline it.
+ bool hasTrustedImplementationAnnotation = false;
+
+ const LocationContext *LCtx = C.getLocationContext();
+
+ using BehaviorSummary = RetainSummaryManager::BehaviorSummary;
+ Optional<BehaviorSummary> BSmr =
+ SmrMgr.canEval(CE, FD, hasTrustedImplementationAnnotation);
+
+ // See if it's one of the specific functions we know how to eval.
+ if (!BSmr)
+ return false;
+
+ // Bind the return value.
+ if (BSmr == BehaviorSummary::Identity ||
+ BSmr == BehaviorSummary::IdentityOrZero) {
+ SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+
+ // If the receiver is unknown or the function has
+ // 'rc_ownership_trusted_implementation' annotate attribute, conjure a
+ // return value.
+ if (RetVal.isUnknown() ||
+ (hasTrustedImplementationAnnotation && !ResultTy.isNull())) {
+ SValBuilder &SVB = C.getSValBuilder();
+ RetVal =
+ SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
+ }
+ state = state->BindExpr(CE, LCtx, RetVal, /*Invalidate=*/false);
+
+ if (BSmr == BehaviorSummary::IdentityOrZero) {
+ // Add a branch where the output is zero.
+ ProgramStateRef NullOutputState = C.getState();
+
+ // Assume that output is zero on the other branch.
+ NullOutputState = NullOutputState->BindExpr(
+ CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
+
+ C.addTransition(NullOutputState);
+
+ // And on the original branch assume that both input and
+ // output are non-zero.
+ if (auto L = RetVal.getAs<DefinedOrUnknownSVal>())
+ state = state->assume(*L, /*Assumption=*/true);
+
+ }
+ }
+
+ C.addTransition(state);
+ return true;
+}
+
+ExplodedNode * RetainCountChecker::processReturn(const ReturnStmt *S,
+ CheckerContext &C) const {
+ ExplodedNode *Pred = C.getPredecessor();
+
+ // Only adjust the reference count if this is the top-level call frame,
+ // and not the result of inlining. In the future, we should do
+ // better checking even for inlined calls, and see if they match
+ // with their expected semantics (e.g., the method should return a retained
+ // object, etc.).
+ if (!C.inTopFrame())
+ return Pred;
+
+ if (!S)
+ return Pred;
+
+ const Expr *RetE = S->getRetValue();
+ if (!RetE)
+ return Pred;
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym =
+ state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
+ if (!Sym)
+ return Pred;
+
+ // Get the reference count binding (if any).
+ const RefVal *T = getRefBinding(state, Sym);
+ if (!T)
+ return Pred;
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
+ unsigned cnt = X.getCount();
+ assert(cnt > 0);
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ break;
+ }
+
+ case RefVal::NotOwned: {
+ unsigned cnt = X.getCount();
+ if (cnt) {
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ } else {
+ X = X ^ RefVal::ReturnedNotOwned;
+ }
+ break;
+ }
+
+ default:
+ return Pred;
+ }
+
+ // Update the binding.
+ state = setRefBinding(state, Sym, X);
+ Pred = C.addTransition(state);
+
+ // At this point we have updated the state properly.
+ // Everything after this is merely checking to see if the return value has
+ // been over- or under-retained.
+
+ // Did we cache out?
+ if (!Pred)
+ return nullptr;
+
+ // Update the autorelease counts.
+ static CheckerProgramPointTag AutoreleaseTag(this, "Autorelease");
+ state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X, S);
+
+ // Have we generated a sink node?
+ if (!state)
+ return nullptr;
+
+ // Get the updated binding.
+ T = getRefBinding(state, Sym);
+ assert(T);
+ X = *T;
+
+ // Consult the summary of the enclosing method.
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const Decl *CD = &Pred->getCodeDecl();
+ RetEffect RE = RetEffect::MakeNoRet();
+
+ // FIXME: What is the convention for blocks? Is there one?
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary *Summ = Summaries.getMethodSummary(MD);
+ RE = Summ->getRetEffect();
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
+ if (!isa<CXXMethodDecl>(FD)) {
+ const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
+ RE = Summ->getRetEffect();
+ }
+ }
+
+ return checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
+}
+
+ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
+ CheckerContext &C,
+ ExplodedNode *Pred,
+ RetEffect RE, RefVal X,
+ SymbolRef Sym,
+ ProgramStateRef state) const {
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (X.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return Pred;
+
+ // Any leaks or other errors?
+ if (X.isReturnedOwned() && X.getCount() == 0) {
+ if (RE.getKind() != RetEffect::NoRet) {
+ if (!RE.isOwned()) {
+
+ // The returning type is a CF, we expect the enclosing method should
+ // return ownership.
+ X = X ^ RefVal::ErrorLeakReturned;
+
+ // Generate an error node.
+ state = setRefBinding(state, Sym, X);
+
+ static CheckerProgramPointTag ReturnOwnLeakTag(this, "ReturnsOwnLeak");
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
+ if (N) {
+ const LangOptions &LOpts = C.getASTContext().getLangOpts();
+ auto R = llvm::make_unique<RefLeakReport>(
+ *getLeakAtReturnBug(LOpts), LOpts, N, Sym, C);
+ C.emitReport(std::move(R));
+ }
+ return N;
+ }
+ }
+ } else if (X.isReturnedNotOwned()) {
+ if (RE.isOwned()) {
+ if (X.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::AccessedDirectly) {
+ // Assume the method was trying to transfer a +1 reference from a
+ // strong ivar to the caller.
+ state = setRefBinding(state, Sym,
+ X.releaseViaIvar() ^ RefVal::ReturnedOwned);
+ } else {
+ // Trying to return a not owned object to a caller expecting an
+ // owned object.
+ state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
+
+ static CheckerProgramPointTag
+ ReturnNotOwnedTag(this, "ReturnNotOwnedForOwned");
+
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
+ if (N) {
+ if (!returnNotOwnedForOwned)
+ returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
+
+ auto R = llvm::make_unique<RefCountReport>(
+ *returnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym);
+ C.emitReport(std::move(R));
+ }
+ return N;
+ }
+ }
+ }
+ return Pred;
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+
+ // A value escapes in three possible cases (this may change):
+ //
+ // (1) we are binding to something that is not a memory region.
+ // (2) we are binding to a memregion that does not have stack storage
+ ProgramStateRef state = C.getState();
+
+ if (auto regionLoc = loc.getAs<loc::MemRegionVal>()) {
+ escapes = shouldEscapeRegion(regionLoc->getRegion());
+ }
+
+ // If we are storing the value into an auto function scope variable annotated
+ // with (__attribute__((cleanup))), stop tracking the value to avoid leak
+ // false positives.
+ if (const auto *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
+ const VarDecl *VD = LVR->getDecl();
+ if (VD->hasAttr<CleanupAttr>()) {
+ escapes = true;
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+ // FIXME: We may add to the interface of evalAssume the list of symbols
+ // whose assumptions have changed. For now we just iterate through the
+ // bindings and check if any of the tracked symbols are NULL. This isn't
+ // too bad since the number of symbols we will track in practice are
+ // probably small and evalAssume is only called at branches and a few
+ // other places.
+ RefBindingsTy B = state->get<RefBindings>();
+
+ if (B.isEmpty())
+ return state;
+
+ bool changed = false;
+ RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ // Check if the symbol is null stop tracking the symbol.
+ ConstraintManager &CMgr = state->getConstraintManager();
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ if (AllocFailed.isConstrainedTrue()) {
+ changed = true;
+ B = RefBFactory.remove(B, I.getKey());
+ }
+ }
+
+ if (changed)
+ state = state->set<RefBindings>(B);
+
+ return state;
+}
+
+ProgramStateRef
+RetainCountChecker::checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx,
+ const CallEvent *Call) const {
+ if (!invalidated)
+ return state;
+
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(SR->getSymbol());
+ }
+
+ for (SymbolRef sym :
+ llvm::make_range(invalidated->begin(), invalidated->end())) {
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // Remove any existing reference-count binding.
+ state = removeRefBinding(state, sym);
+ }
+ return state;
+}
+
+ProgramStateRef
+RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag,
+ CheckerContext &Ctx,
+ SymbolRef Sym,
+ RefVal V,
+ const ReturnStmt *S) const {
+ unsigned ACnt = V.getAutoreleaseCount();
+
+ // No autorelease counts? Nothing to be done.
+ if (!ACnt)
+ return state;
+
+ unsigned Cnt = V.getCount();
+
+ // FIXME: Handle sending 'autorelease' to already released object.
+
+ if (V.getKind() == RefVal::ReturnedOwned)
+ ++Cnt;
+
+ // If we would over-release here, but we know the value came from an ivar,
+ // assume it was a strong ivar that's just been relinquished.
+ if (ACnt > Cnt &&
+ V.getIvarAccessHistory() == RefVal::IvarAccessHistory::AccessedDirectly) {
+ V = V.releaseViaIvar();
+ --ACnt;
+ }
+
+ if (ACnt <= Cnt) {
+ if (ACnt == Cnt) {
+ V.clearCounts();
+ if (V.getKind() == RefVal::ReturnedOwned) {
+ V = V ^ RefVal::ReturnedNotOwned;
+ } else {
+ V = V ^ RefVal::NotOwned;
+ }
+ } else {
+ V.setCount(V.getCount() - ACnt);
+ V.setAutoreleaseCount(0);
+ }
+ return setRefBinding(state, Sym, V);
+ }
+
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ return state;
+
+ // Woah! More autorelease counts then retain counts left.
+ // Emit hard error.
+ V = V ^ RefVal::ErrorOverAutorelease;
+ state = setRefBinding(state, Sym, V);
+
+ ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
+ if (N) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Object was autoreleased ";
+ if (V.getAutoreleaseCount() > 1)
+ os << V.getAutoreleaseCount() << " times but the object ";
+ else
+ os << "but ";
+ os << "has a +" << V.getCount() << " retain count";
+
+ if (!overAutorelease)
+ overAutorelease.reset(new OverAutorelease(this));
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ auto R = llvm::make_unique<RefCountReport>(*overAutorelease, LOpts, N, Sym,
+ os.str());
+ Ctx.emitReport(std::move(R));
+ }
+
+ return nullptr;
+}
+
+ProgramStateRef
+RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const {
+ bool hasLeak;
+
+ // HACK: Ignore retain-count issues on values accessed through ivars,
+ // because of cases like this:
+ // [_contentView retain];
+ // [_contentView removeFromSuperview];
+ // [self addSubview:_contentView]; // invalidates 'self'
+ // [_contentView release];
+ if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
+ hasLeak = false;
+ else if (V.isOwned())
+ hasLeak = true;
+ else if (V.isNotOwned() || V.isReturnedOwned())
+ hasLeak = (V.getCount() > 0);
+ else
+ hasLeak = false;
+
+ if (!hasLeak)
+ return removeRefBinding(state, sid);
+
+ Leaked.push_back(sid);
+ return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode *
+RetainCountChecker::processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred) const {
+ // Generate an intermediate node representing the leak point.
+ ExplodedNode *N = Ctx.addTransition(state, Pred);
+
+ if (N) {
+ for (SmallVectorImpl<SymbolRef>::iterator
+ I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ RefCountBug *BT = Pred ? getLeakWithinFunctionBug(LOpts)
+ : getLeakAtReturnBug(LOpts);
+ assert(BT && "BugType not initialized.");
+
+ Ctx.emitReport(
+ llvm::make_unique<RefLeakReport>(*BT, LOpts, N, *I, Ctx));
+ }
+ }
+
+ return N;
+}
+
+static bool isISLObjectRef(QualType Ty) {
+ return StringRef(Ty.getAsString()).startswith("isl_");
+}
+
+void RetainCountChecker::checkBeginFunction(CheckerContext &Ctx) const {
+ if (!Ctx.inTopFrame())
+ return;
+
+ RetainSummaryManager &SmrMgr = getSummaryManager(Ctx);
+ const LocationContext *LCtx = Ctx.getLocationContext();
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(LCtx->getDecl());
+
+ if (!FD || SmrMgr.isTrustedReferenceCountImplementation(FD))
+ return;
+
+ ProgramStateRef state = Ctx.getState();
+ const RetainSummary *FunctionSummary = SmrMgr.getFunctionSummary(FD);
+ ArgEffects CalleeSideArgEffects = FunctionSummary->getArgEffects();
+
+ for (unsigned idx = 0, e = FD->getNumParams(); idx != e; ++idx) {
+ const ParmVarDecl *Param = FD->getParamDecl(idx);
+ SymbolRef Sym = state->getSVal(state->getRegion(Param, LCtx)).getAsSymbol();
+
+ QualType Ty = Param->getType();
+ const ArgEffect *AE = CalleeSideArgEffects.lookup(idx);
+ if (AE && AE->getKind() == DecRef && isISLObjectRef(Ty)) {
+ state = setRefBinding(
+ state, Sym, RefVal::makeOwned(ObjKind::Generalized, Ty));
+ } else if (isISLObjectRef(Ty)) {
+ state = setRefBinding(
+ state, Sym,
+ RefVal::makeNotOwned(ObjKind::Generalized, Ty));
+ }
+ }
+
+ Ctx.addTransition(state);
+}
+
+void RetainCountChecker::checkEndFunction(const ReturnStmt *RS,
+ CheckerContext &Ctx) const {
+ ExplodedNode *Pred = processReturn(RS, Ctx);
+
+ // Created state cached out.
+ if (!Pred) {
+ return;
+ }
+
+ ProgramStateRef state = Pred->getState();
+ RefBindingsTy B = state->get<RefBindings>();
+
+ // Don't process anything within synthesized bodies.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized()) {
+ assert(!LCtx->inTopFrame());
+ return;
+ }
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ state = handleAutoreleaseCounts(state, Pred, /*Tag=*/nullptr, Ctx,
+ I->first, I->second);
+ if (!state)
+ return;
+ }
+
+ // If the current LocationContext has a parent, don't check for leaks.
+ // We will do that later.
+ // FIXME: we should instead check for imbalances of the retain/releases,
+ // and suggest annotations.
+ if (LCtx->getParent())
+ return;
+
+ B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ state = handleSymbolDeath(state, I->first, I->second, Leaked);
+
+ processLeaks(state, Leaked, Ctx, Pred);
+}
+
+void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ExplodedNode *Pred = C.getPredecessor();
+
+ ProgramStateRef state = C.getState();
+ RefBindingsTy B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ // Update counts from autorelease pools
+ for (const auto &I: state->get<RefBindings>()) {
+ SymbolRef Sym = I.first;
+ if (SymReaper.isDead(Sym)) {
+ static CheckerProgramPointTag Tag(this, "DeadSymbolAutorelease");
+ const RefVal &V = I.second;
+ state = handleAutoreleaseCounts(state, Pred, &Tag, C, Sym, V);
+ if (!state)
+ return;
+
+ // Fetch the new reference count from the state, and use it to handle
+ // this symbol.
+ state = handleSymbolDeath(state, Sym, *getRefBinding(state, Sym), Leaked);
+ }
+ }
+
+ if (Leaked.empty()) {
+ C.addTransition(state);
+ return;
+ }
+
+ Pred = processLeaks(state, Leaked, C, Pred);
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Now generate a new node that nukes the old bindings.
+ // The only bindings left at this point are the leaked symbols.
+ RefBindingsTy::Factory &F = state->get_context<RefBindings>();
+ B = state->get<RefBindings>();
+
+ for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
+ E = Leaked.end();
+ I != E; ++I)
+ B = F.remove(B, *I);
+
+ state = state->set<RefBindings>(B);
+ C.addTransition(state, Pred);
+}
+
+void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ RefBindingsTy B = State->get<RefBindings>();
+
+ if (B.isEmpty())
+ return;
+
+ Out << Sep << NL;
+
+ for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ Out << I->first << " : ";
+ I->second.print(Out);
+ Out << NL;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerRetainCountChecker(CheckerManager &Mgr) {
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ Chk->TrackObjCAndCFObjects = true;
+}
+
+// FIXME: remove this, hack for backwards compatibility:
+// it should be possible to enable the NS/CF retain count checker as
+// osx.cocoa.RetainCount, and it should be possible to disable
+// osx.OSObjectRetainCount using osx.cocoa.RetainCount:CheckOSObject=false.
+static bool hasPrevCheckOSObjectOptionDisabled(AnalyzerOptions &Options) {
+ auto I = Options.Config.find("osx.cocoa.RetainCount:CheckOSObject");
+ if (I != Options.Config.end())
+ return I->getValue() == "false";
+ return false;
+}
+
+void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) {
+ auto *Chk = Mgr.registerChecker<RetainCountChecker>();
+ if (!hasPrevCheckOSObjectOptionDisabled(Mgr.getAnalyzerOptions()))
+ Chk->TrackOSObjects = true;
+}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
new file mode 100644
index 000000000000..31e2d9ae4932
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -0,0 +1,393 @@
+//==--- RetainCountChecker.h - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_H
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "RetainCountDiagnostics.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/SelectorExtras.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <cstdarg>
+#include <utility>
+
+namespace clang {
+namespace ento {
+namespace retaincountchecker {
+
+/// Metadata on reference.
+class RefVal {
+public:
+ enum Kind {
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
+ Released, // Object has been released.
+ ReturnedOwned, // Returned object passes ownership to caller.
+ ReturnedNotOwned, // Return object does not pass ownership to caller.
+ ERROR_START,
+ ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+ ErrorUseAfterRelease, // Object used after released.
+ ErrorReleaseNotOwned, // Release of an object that was not owned.
+ ERROR_LEAK_START,
+ ErrorLeak, // A memory leak due to excessive reference counts.
+ ErrorLeakReturned, // A memory leak due to the returning method not having
+ // the correct naming conventions.
+ ErrorOverAutorelease,
+ ErrorReturnedNotOwned
+ };
+
+ /// Tracks how an object referenced by an ivar has been used.
+ ///
+ /// This accounts for us not knowing if an arbitrary ivar is supposed to be
+ /// stored at +0 or +1.
+ enum class IvarAccessHistory {
+ None,
+ AccessedDirectly,
+ ReleasedAfterDirectAccess
+ };
+
+private:
+ /// The number of outstanding retains.
+ unsigned Cnt;
+ /// The number of outstanding autoreleases.
+ unsigned ACnt;
+ /// The (static) type of the object at the time we started tracking it.
+ QualType T;
+
+ /// The current state of the object.
+ ///
+ /// See the RefVal::Kind enum for possible values.
+ unsigned RawKind : 5;
+
+ /// The kind of object being tracked (CF or ObjC or OSObject), if known.
+ ///
+ /// See the ObjKind enum for possible values.
+ unsigned RawObjectKind : 3;
+
+ /// True if the current state and/or retain count may turn out to not be the
+ /// best possible approximation of the reference counting state.
+ ///
+ /// If true, the checker may decide to throw away ("override") this state
+ /// in favor of something else when it sees the object being used in new ways.
+ ///
+ /// This setting should not be propagated to state derived from this state.
+ /// Once we start deriving new states, it would be inconsistent to override
+ /// them.
+ unsigned RawIvarAccessHistory : 2;
+
+ RefVal(Kind k, ObjKind o, unsigned cnt, unsigned acnt, QualType t,
+ IvarAccessHistory IvarAccess)
+ : Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)),
+ RawObjectKind(static_cast<unsigned>(o)),
+ RawIvarAccessHistory(static_cast<unsigned>(IvarAccess)) {
+ assert(getKind() == k && "not enough bits for the kind");
+ assert(getObjKind() == o && "not enough bits for the object kind");
+ assert(getIvarAccessHistory() == IvarAccess && "not enough bits");
+ }
+
+public:
+ Kind getKind() const { return static_cast<Kind>(RawKind); }
+
+ ObjKind getObjKind() const {
+ return static_cast<ObjKind>(RawObjectKind);
+ }
+
+ unsigned getCount() const { return Cnt; }
+ unsigned getAutoreleaseCount() const { return ACnt; }
+ unsigned getCombinedCounts() const { return Cnt + ACnt; }
+ void clearCounts() {
+ Cnt = 0;
+ ACnt = 0;
+ }
+ void setCount(unsigned i) {
+ Cnt = i;
+ }
+ void setAutoreleaseCount(unsigned i) {
+ ACnt = i;
+ }
+
+ QualType getType() const { return T; }
+
+ /// Returns what the analyzer knows about direct accesses to a particular
+ /// instance variable.
+ ///
+ /// If the object with this refcount wasn't originally from an Objective-C
+ /// ivar region, this should always return IvarAccessHistory::None.
+ IvarAccessHistory getIvarAccessHistory() const {
+ return static_cast<IvarAccessHistory>(RawIvarAccessHistory);
+ }
+
+ bool isOwned() const {
+ return getKind() == Owned;
+ }
+
+ bool isNotOwned() const {
+ return getKind() == NotOwned;
+ }
+
+ bool isReturnedOwned() const {
+ return getKind() == ReturnedOwned;
+ }
+
+ bool isReturnedNotOwned() const {
+ return getKind() == ReturnedNotOwned;
+ }
+
+ /// Create a state for an object whose lifetime is the responsibility of the
+ /// current function, at least partially.
+ ///
+ /// Most commonly, this is an owned object with a retain count of +1.
+ static RefVal makeOwned(ObjKind o, QualType t) {
+ return RefVal(Owned, o, /*Count=*/1, 0, t, IvarAccessHistory::None);
+ }
+
+ /// Create a state for an object whose lifetime is not the responsibility of
+ /// the current function.
+ ///
+ /// Most commonly, this is an unowned object with a retain count of +0.
+ static RefVal makeNotOwned(ObjKind o, QualType t) {
+ return RefVal(NotOwned, o, /*Count=*/0, 0, t, IvarAccessHistory::None);
+ }
+
+ RefVal operator-(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() - i,
+ getAutoreleaseCount(), getType(), getIvarAccessHistory());
+ }
+
+ RefVal operator+(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() + i,
+ getAutoreleaseCount(), getType(), getIvarAccessHistory());
+ }
+
+ RefVal operator^(Kind k) const {
+ return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), getIvarAccessHistory());
+ }
+
+ RefVal autorelease() const {
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+ getType(), getIvarAccessHistory());
+ }
+
+ RefVal withIvarAccess() const {
+ assert(getIvarAccessHistory() == IvarAccessHistory::None);
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), IvarAccessHistory::AccessedDirectly);
+ }
+
+ RefVal releaseViaIvar() const {
+ assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly);
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
+ getType(), IvarAccessHistory::ReleasedAfterDirectAccess);
+ }
+
+ // Comparison, profiling, and pretty-printing.
+ bool hasSameState(const RefVal &X) const {
+ return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt &&
+ getIvarAccessHistory() == X.getIvarAccessHistory();
+ }
+
+ bool operator==(const RefVal& X) const {
+ return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind();
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.Add(T);
+ ID.AddInteger(RawKind);
+ ID.AddInteger(Cnt);
+ ID.AddInteger(ACnt);
+ ID.AddInteger(RawObjectKind);
+ ID.AddInteger(RawIvarAccessHistory);
+ }
+
+ void print(raw_ostream &Out) const;
+};
+
+class RetainCountChecker
+ : public Checker< check::Bind,
+ check::DeadSymbols,
+ check::BeginFunction,
+ check::EndFunction,
+ check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>,
+ check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCBoxedExpr>,
+ check::PostStmt<ObjCIvarRefExpr>,
+ check::PostCall,
+ check::RegionChanges,
+ eval::Assume,
+ eval::Call > {
+ mutable std::unique_ptr<RefCountBug> useAfterRelease, releaseNotOwned;
+ mutable std::unique_ptr<RefCountBug> deallocNotOwned;
+ mutable std::unique_ptr<RefCountBug> overAutorelease, returnNotOwnedForOwned;
+ mutable std::unique_ptr<RefCountBug> leakWithinFunction, leakAtReturn;
+
+ mutable std::unique_ptr<RetainSummaryManager> Summaries;
+public:
+ static constexpr const char *DeallocTagDescription = "DeallocSent";
+
+ /// Track Objective-C and CoreFoundation objects.
+ bool TrackObjCAndCFObjects = false;
+
+ /// Track sublcasses of OSObject.
+ bool TrackOSObjects = false;
+
+ RetainCountChecker() {}
+
+ RefCountBug *getLeakWithinFunctionBug(const LangOptions &LOpts) const;
+
+ RefCountBug *getLeakAtReturnBug(const LangOptions &LOpts) const;
+
+ RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const {
+ // FIXME: We don't support ARC being turned on and off during one analysis.
+ // (nor, for that matter, do we support changing ASTContexts)
+ bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
+ if (!Summaries) {
+ Summaries.reset(new RetainSummaryManager(
+ Ctx, ARCEnabled, TrackObjCAndCFObjects, TrackOSObjects));
+ } else {
+ assert(Summaries->isARCEnabled() == ARCEnabled);
+ }
+ return *Summaries;
+ }
+
+ RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
+ return getSummaryManager(C.getASTContext());
+ }
+
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const override;
+
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+ void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
+
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
+
+ void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void processSummaryOfInlined(const RetainSummary &Summ,
+ const CallEvent &Call,
+ CheckerContext &C) const;
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+ bool Assumption) const;
+
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext* LCtx,
+ const CallEvent *Call) const;
+
+ ExplodedNode* checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
+ ExplodedNode *Pred, RetEffect RE, RefVal X,
+ SymbolRef Sym, ProgramStateRef state) const;
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkBeginFunction(CheckerContext &C) const;
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+
+ ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const;
+
+ void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
+ RefVal::Kind ErrorKind, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
+
+ ProgramStateRef handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const;
+
+ ProgramStateRef
+ handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
+ const ProgramPointTag *Tag, CheckerContext &Ctx,
+ SymbolRef Sym,
+ RefVal V,
+ const ReturnStmt *S=nullptr) const;
+
+ ExplodedNode *processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred = nullptr) const;
+
+private:
+ /// Perform the necessary checks and state adjustments at the end of the
+ /// function.
+ /// \p S Return statement, may be null.
+ ExplodedNode * processReturn(const ReturnStmt *S, CheckerContext &C) const;
+};
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+const RefVal *getRefBinding(ProgramStateRef State, SymbolRef Sym);
+
+ProgramStateRef setRefBinding(ProgramStateRef State, SymbolRef Sym,
+ RefVal Val);
+
+ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym);
+
+/// Returns true if this stack frame is for an Objective-C method that is a
+/// property getter or setter whose body has been synthesized by the analyzer.
+inline bool isSynthesizedAccessor(const StackFrameContext *SFC) {
+ auto Method = dyn_cast_or_null<ObjCMethodDecl>(SFC->getDecl());
+ if (!Method || !Method->isPropertyAccessor())
+ return false;
+
+ return SFC->getAnalysisDeclContext()->isBodyAutosynthesized();
+}
+
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
new file mode 100644
index 000000000000..cda1a928de13
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -0,0 +1,794 @@
+// RetainCountDiagnostics.cpp - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines diagnostics for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "RetainCountDiagnostics.h"
+#include "RetainCountChecker.h"
+
+using namespace clang;
+using namespace ento;
+using namespace retaincountchecker;
+
+static bool isNumericLiteralExpression(const Expr *E) {
+ // FIXME: This set of cases was copied from SemaExprObjC.
+ return isa<IntegerLiteral>(E) ||
+ isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E) ||
+ isa<ObjCBoolLiteralExpr>(E) ||
+ isa<CXXBoolLiteralExpr>(E);
+}
+
+/// If type represents a pointer to CXXRecordDecl,
+/// and is not a typedef, return the decl name.
+/// Otherwise, return the serialization of type.
+static std::string getPrettyTypeName(QualType QT) {
+ QualType PT = QT->getPointeeType();
+ if (!PT.isNull() && !QT->getAs<TypedefType>())
+ if (const auto *RD = PT->getAsCXXRecordDecl())
+ return RD->getName();
+ return QT.getAsString();
+}
+
+/// Write information about the type state change to {@code os},
+/// return whether the note should be generated.
+static bool shouldGenerateNote(llvm::raw_string_ostream &os,
+ const RefVal *PrevT, const RefVal &CurrV,
+ bool DeallocSent) {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (DeallocSent) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ return true;
+ }
+ }
+
+ // Determine if the typestate has changed.
+ if (!PrevV.hasSameState(CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return false;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object autoreleased";
+ return true;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ return true;
+
+ case RefVal::Released:
+ if (CurrV.getIvarAccessHistory() ==
+ RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
+ CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
+ os << "Strong instance variable relinquished. ";
+ }
+ os << "Object released.";
+ return true;
+
+ case RefVal::ReturnedOwned:
+ // Autoreleases can be applied after marking a node ReturnedOwned.
+ if (CurrV.getAutoreleaseCount())
+ return false;
+
+ os << "Object returned to caller as an owning reference (single "
+ "retain count transferred to caller)";
+ return true;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 retain count";
+ return true;
+
+ default:
+ return false;
+ }
+ return true;
+}
+
+/// Finds argument index of the out paramter in the call {@code S}
+/// corresponding to the symbol {@code Sym}.
+/// If none found, returns None.
+static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
+ const LocationContext *LCtx,
+ SymbolRef &Sym,
+ Optional<CallEventRef<>> CE) {
+ if (!CE)
+ return None;
+
+ for (unsigned Idx = 0; Idx < (*CE)->getNumArgs(); Idx++)
+ if (const MemRegion *MR = (*CE)->getArgSVal(Idx).getAsRegion())
+ if (const auto *TR = dyn_cast<TypedValueRegion>(MR))
+ if (CurrSt->getSVal(MR, TR->getValueType()).getAsSymExpr() == Sym)
+ return Idx;
+
+ return None;
+}
+
+static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
+ const LocationContext *LCtx,
+ const RefVal &CurrV, SymbolRef &Sym,
+ const Stmt *S,
+ llvm::raw_string_ostream &os) {
+ CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available)
+ // from the tracked SVal.
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+ const FunctionDecl *FD = X.getAsFunctionDecl();
+
+ // If failed, try to get it from AST.
+ if (!FD)
+ FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(CE->getCalleeDecl())) {
+ os << "Call to method '" << MD->getQualifiedNameAsString() << '\'';
+ } else if (FD) {
+ os << "Call to function '" << FD->getQualifiedNameAsString() << '\'';
+ } else {
+ os << "function call";
+ }
+ } else if (isa<CXXNewExpr>(S)) {
+ os << "Operator 'new'";
+ } else {
+ assert(isa<ObjCMessageExpr>(S));
+ CallEventRef<ObjCMethodCall> Call =
+ Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+
+ switch (Call->getMessageKind()) {
+ case OCM_Message:
+ os << "Method";
+ break;
+ case OCM_PropertyAccess:
+ os << "Property";
+ break;
+ case OCM_Subscript:
+ os << "Subscript";
+ break;
+ }
+ }
+
+ Optional<CallEventRef<>> CE = Mgr.getCall(S, CurrSt, LCtx);
+ auto Idx = findArgIdxOfSymbol(CurrSt, LCtx, Sym, CE);
+
+ // If index is not found, we assume that the symbol was returned.
+ if (!Idx) {
+ os << " returns ";
+ } else {
+ os << " writes ";
+ }
+
+ if (CurrV.getObjKind() == ObjKind::CF) {
+ os << "a Core Foundation object of type '"
+ << Sym->getType().getAsString() << "' with a ";
+ } else if (CurrV.getObjKind() == ObjKind::OS) {
+ os << "an OSObject of type '" << getPrettyTypeName(Sym->getType())
+ << "' with a ";
+ } else if (CurrV.getObjKind() == ObjKind::Generalized) {
+ os << "an object of type '" << Sym->getType().getAsString()
+ << "' with a ";
+ } else {
+ assert(CurrV.getObjKind() == ObjKind::ObjC);
+ QualType T = Sym->getType();
+ if (!isa<ObjCObjectPointerType>(T)) {
+ os << "an Objective-C object with a ";
+ } else {
+ const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
+ os << "an instance of " << PT->getPointeeType().getAsString()
+ << " with a ";
+ }
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count";
+ } else {
+ assert(CurrV.isNotOwned());
+ os << "+0 retain count";
+ }
+
+ if (Idx) {
+ os << " into an out parameter '";
+ const ParmVarDecl *PVD = (*CE)->parameters()[*Idx];
+ PVD->getNameForDiagnostic(os, PVD->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/false);
+ os << "'";
+
+ QualType RT = (*CE)->getResultType();
+ if (!RT.isNull() && !RT->isVoidType()) {
+ SVal RV = (*CE)->getReturnValue();
+ if (CurrSt->isNull(RV).isConstrainedTrue()) {
+ os << " (assuming the call returns zero)";
+ } else if (CurrSt->isNonNull(RV).isConstrainedTrue()) {
+ os << " (assuming the call returns non-zero)";
+ }
+
+ }
+ }
+}
+
+namespace clang {
+namespace ento {
+namespace retaincountchecker {
+
+class RefCountReportVisitor : public BugReporterVisitor {
+protected:
+ SymbolRef Sym;
+
+public:
+ RefCountReportVisitor(SymbolRef sym) : Sym(sym) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int x = 0;
+ ID.AddPointer(&x);
+ ID.AddPointer(Sym);
+ }
+
+ std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+};
+
+class RefLeakReportVisitor : public RefCountReportVisitor {
+public:
+ RefLeakReportVisitor(SymbolRef sym) : RefCountReportVisitor(sym) {}
+
+ std::shared_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR) override;
+};
+
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+
+/// Find the first node with the parent stack frame.
+static const ExplodedNode *getCalleeNode(const ExplodedNode *Pred) {
+ const StackFrameContext *SC = Pred->getStackFrame();
+ if (SC->inTopFrame())
+ return nullptr;
+ const StackFrameContext *PC = SC->getParent()->getStackFrame();
+ if (!PC)
+ return nullptr;
+
+ const ExplodedNode *N = Pred;
+ while (N && N->getStackFrame() != PC) {
+ N = N->getFirstPred();
+ }
+ return N;
+}
+
+
+/// Insert a diagnostic piece at function exit
+/// if a function parameter is annotated as "os_consumed",
+/// but it does not actually consume the reference.
+static std::shared_ptr<PathDiagnosticEventPiece>
+annotateConsumedSummaryMismatch(const ExplodedNode *N,
+ CallExitBegin &CallExitLoc,
+ const SourceManager &SM,
+ CallEventManager &CEMgr) {
+
+ const ExplodedNode *CN = getCalleeNode(N);
+ if (!CN)
+ return nullptr;
+
+ CallEventRef<> Call = CEMgr.getCaller(N->getStackFrame(), N->getState());
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ ArrayRef<const ParmVarDecl *> Parameters = Call->parameters();
+ for (unsigned I=0; I < Call->getNumArgs() && I < Parameters.size(); ++I) {
+ const ParmVarDecl *PVD = Parameters[I];
+
+ if (!PVD->hasAttr<OSConsumedAttr>())
+ continue;
+
+ if (SymbolRef SR = Call->getArgSVal(I).getAsLocSymbol()) {
+ const RefVal *CountBeforeCall = getRefBinding(CN->getState(), SR);
+ const RefVal *CountAtExit = getRefBinding(N->getState(), SR);
+
+ if (!CountBeforeCall || !CountAtExit)
+ continue;
+
+ unsigned CountBefore = CountBeforeCall->getCount();
+ unsigned CountAfter = CountAtExit->getCount();
+
+ bool AsExpected = CountBefore > 0 && CountAfter == CountBefore - 1;
+ if (!AsExpected) {
+ os << "Parameter '";
+ PVD->getNameForDiagnostic(os, PVD->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/false);
+ os << "' is marked as consuming, but the function did not consume "
+ << "the reference\n";
+ }
+ }
+ }
+
+ if (os.str().empty())
+ return nullptr;
+
+ // FIXME: remove the code duplication with NoStoreFuncVisitor.
+ PathDiagnosticLocation L;
+ if (const ReturnStmt *RS = CallExitLoc.getReturnStmt()) {
+ L = PathDiagnosticLocation::createBegin(RS, SM, N->getLocationContext());
+ } else {
+ L = PathDiagnosticLocation(
+ Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(), SM);
+ }
+
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+RefCountReportVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC, BugReport &BR) {
+
+ const SourceManager &SM = BRC.getSourceManager();
+ CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
+ if (auto CE = N->getLocationAs<CallExitBegin>())
+ if (auto PD = annotateConsumedSummaryMismatch(N, *CE, SM, CEMgr))
+ return PD;
+
+ // FIXME: We will eventually need to handle non-statement-based events
+ // (__attribute__((cleanup))).
+ if (!N->getLocation().getAs<StmtPoint>())
+ return nullptr;
+
+ // Check if the type state has changed.
+ const ExplodedNode *PrevNode = N->getFirstPred();
+ ProgramStateRef PrevSt = PrevNode->getState();
+ ProgramStateRef CurrSt = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+
+ const RefVal* CurrT = getRefBinding(CurrSt, Sym);
+ if (!CurrT) return nullptr;
+
+ const RefVal &CurrV = *CurrT;
+ const RefVal *PrevT = getRefBinding(PrevSt, Sym);
+
+ // Create a string buffer to constain all the useful things we want
+ // to tell the user.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ // This is the allocation site since the previous node had no bindings
+ // for this symbol.
+ if (!PrevT) {
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+ if (isa<ObjCIvarRefExpr>(S) &&
+ isSynthesizedAccessor(LCtx->getStackFrame())) {
+ S = LCtx->getStackFrame()->getCallSite();
+ }
+
+ if (isa<ObjCArrayLiteral>(S)) {
+ os << "NSArray literal is an object with a +0 retain count";
+ } else if (isa<ObjCDictionaryLiteral>(S)) {
+ os << "NSDictionary literal is an object with a +0 retain count";
+ } else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
+ if (isNumericLiteralExpression(BL->getSubExpr()))
+ os << "NSNumber literal is an object with a +0 retain count";
+ else {
+ const ObjCInterfaceDecl *BoxClass = nullptr;
+ if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
+ BoxClass = Method->getClassInterface();
+
+ // We should always be able to find the boxing class interface,
+ // but consider this future-proofing.
+ if (BoxClass) {
+ os << *BoxClass << " b";
+ } else {
+ os << "B";
+ }
+
+ os << "oxed expression produces an object with a +0 retain count";
+ }
+ } else if (isa<ObjCIvarRefExpr>(S)) {
+ os << "Object loaded from instance variable";
+ } else {
+ generateDiagnosticsForCallLike(CurrSt, LCtx, CurrV, Sym, S, os);
+ }
+
+ PathDiagnosticLocation Pos(S, SM, N->getLocationContext());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+ }
+
+ // Gather up the effects that were performed on the object at this
+ // program point
+ bool DeallocSent = false;
+
+ if (N->getLocation().getTag() &&
+ N->getLocation().getTag()->getTagDescription().contains(
+ RetainCountChecker::DeallocTagDescription)) {
+ // We only have summaries attached to nodes after evaluating CallExpr and
+ // ObjCMessageExprs.
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Iterate through the parameter expressions and see if the symbol
+ // was ever passed as an argument.
+ unsigned i = 0;
+
+ for (auto AI=CE->arg_begin(), AE=CE->arg_end(); AI!=AE; ++AI, ++i) {
+
+ // Retrieve the value of the argument. Is it the symbol
+ // we are interested in?
+ if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
+ continue;
+
+ // We have an argument. Get the effect!
+ DeallocSent = true;
+ }
+ } else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const Expr *receiver = ME->getInstanceReceiver()) {
+ if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
+ .getAsLocSymbol() == Sym) {
+ // The symbol we are tracking is the receiver.
+ DeallocSent = true;
+ }
+ }
+ }
+ }
+
+ if (!shouldGenerateNote(os, PrevT, CurrV, DeallocSent))
+ return nullptr;
+
+ if (os.str().empty())
+ return nullptr; // We have nothing to say!
+
+ const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ auto P = std::make_shared<PathDiagnosticEventPiece>(Pos, os.str());
+
+ // Add the range by scanning the children of the statement for any bindings
+ // to Sym.
+ for (const Stmt *Child : S->children())
+ if (const Expr *Exp = dyn_cast_or_null<Expr>(Child))
+ if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
+ P->addRange(Exp->getSourceRange());
+ break;
+ }
+
+ return std::move(P);
+}
+
+static Optional<std::string> describeRegion(const MemRegion *MR) {
+ if (const auto *VR = dyn_cast_or_null<VarRegion>(MR))
+ return std::string(VR->getDecl()->getName());
+ // Once we support more storage locations for bindings,
+ // this would need to be improved.
+ return None;
+}
+
+namespace {
+// Find the first node in the current function context that referred to the
+// tracked symbol and the memory location that value was stored to. Note, the
+// value is only reported if the allocation occurred in the same function as
+// the leak. The function can also return a location context, which should be
+// treated as interesting.
+struct AllocationInfo {
+ const ExplodedNode* N;
+ const MemRegion *R;
+ const LocationContext *InterestingMethodContext;
+ AllocationInfo(const ExplodedNode *InN,
+ const MemRegion *InR,
+ const LocationContext *InInterestingMethodContext) :
+ N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
+};
+} // end anonymous namespace
+
+static AllocationInfo GetAllocationSite(ProgramStateManager &StateMgr,
+ const ExplodedNode *N, SymbolRef Sym) {
+ const ExplodedNode *AllocationNode = N;
+ const ExplodedNode *AllocationNodeInCurrentOrParentContext = N;
+ const MemRegion *FirstBinding = nullptr;
+ const LocationContext *LeakContext = N->getLocationContext();
+
+ // The location context of the init method called on the leaked object, if
+ // available.
+ const LocationContext *InitMethodContext = nullptr;
+
+ while (N) {
+ ProgramStateRef St = N->getState();
+ const LocationContext *NContext = N->getLocationContext();
+
+ if (!getRefBinding(St, Sym))
+ break;
+
+ StoreManager::FindUniqueBinding FB(Sym);
+ StateMgr.iterBindings(St, FB);
+
+ if (FB) {
+ const MemRegion *R = FB.getRegion();
+ // Do not show local variables belonging to a function other than
+ // where the error is reported.
+ if (auto MR = dyn_cast<StackSpaceRegion>(R->getMemorySpace()))
+ if (MR->getStackFrame() == LeakContext->getStackFrame())
+ FirstBinding = R;
+ }
+
+ // AllocationNode is the last node in which the symbol was tracked.
+ AllocationNode = N;
+
+ // AllocationNodeInCurrentContext, is the last node in the current or
+ // parent context in which the symbol was tracked.
+ //
+ // Note that the allocation site might be in the parent context. For example,
+ // the case where an allocation happens in a block that captures a reference
+ // to it and that reference is overwritten/dropped by another call to
+ // the block.
+ if (NContext == LeakContext || NContext->isParentOf(LeakContext))
+ AllocationNodeInCurrentOrParentContext = N;
+
+ // Find the last init that was called on the given symbol and store the
+ // init method's location context.
+ if (!InitMethodContext)
+ if (auto CEP = N->getLocation().getAs<CallEnter>()) {
+ const Stmt *CE = CEP->getCallExpr();
+ if (const auto *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
+ const Stmt *RecExpr = ME->getInstanceReceiver();
+ if (RecExpr) {
+ SVal RecV = St->getSVal(RecExpr, NContext);
+ if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
+ InitMethodContext = CEP->getCalleeContext();
+ }
+ }
+ }
+
+ N = N->getFirstPred();
+ }
+
+ // If we are reporting a leak of the object that was allocated with alloc,
+ // mark its init method as interesting.
+ const LocationContext *InterestingMethodContext = nullptr;
+ if (InitMethodContext) {
+ const ProgramPoint AllocPP = AllocationNode->getLocation();
+ if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
+ if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
+ if (ME->getMethodFamily() == OMF_alloc)
+ InterestingMethodContext = InitMethodContext;
+ }
+
+ // If allocation happened in a function different from the leak node context,
+ // do not report the binding.
+ assert(N && "Could not find allocation node");
+
+ if (AllocationNodeInCurrentOrParentContext &&
+ AllocationNodeInCurrentOrParentContext->getLocationContext() !=
+ LeakContext)
+ FirstBinding = nullptr;
+
+ return AllocationInfo(AllocationNodeInCurrentOrParentContext,
+ FirstBinding,
+ InterestingMethodContext);
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+RefCountReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN, BugReport &BR) {
+ BR.markInteresting(Sym);
+ return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
+}
+
+std::shared_ptr<PathDiagnosticPiece>
+RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN, BugReport &BR) {
+
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BR.markInteresting(Sym);
+
+ // We are reporting a leak. Walk up the graph to get to the first node where
+ // the symbol appeared, and also get the first VarDecl that tracked object
+ // is stored to.
+ AllocationInfo AllocI = GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+ const MemRegion* FirstBinding = AllocI.R;
+ BR.markInteresting(AllocI.InterestingMethodContext);
+
+ SourceManager& SM = BRC.getSourceManager();
+
+ // Compute an actual location for the leak. Sometimes a leak doesn't
+ // occur at an actual statement (e.g., transition between blocks; end
+ // of function) so we need to walk the graph and compute a real location.
+ const ExplodedNode *LeakN = EndN;
+ PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Object leaked: ";
+
+ Optional<std::string> RegionDescription = describeRegion(FirstBinding);
+ if (RegionDescription) {
+ os << "object allocated and stored into '" << *RegionDescription << '\'';
+ } else {
+ os << "allocated object of type '" << getPrettyTypeName(Sym->getType())
+ << "'";
+ }
+
+ // Get the retain count.
+ const RefVal* RV = getRefBinding(EndN->getState(), Sym);
+ assert(RV);
+
+ if (RV->getKind() == RefVal::ErrorLeakReturned) {
+ // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+ // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // to the caller for NS objects.
+ const Decl *D = &EndN->getCodeDecl();
+
+ os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
+ : " is returned from a function ");
+
+ if (D->hasAttr<CFReturnsNotRetainedAttr>()) {
+ os << "that is annotated as CF_RETURNS_NOT_RETAINED";
+ } else if (D->hasAttr<NSReturnsNotRetainedAttr>()) {
+ os << "that is annotated as NS_RETURNS_NOT_RETAINED";
+ } else if (D->hasAttr<OSReturnsNotRetainedAttr>()) {
+ os << "that is annotated as OS_RETURNS_NOT_RETAINED";
+ } else {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (BRC.getASTContext().getLangOpts().ObjCAutoRefCount) {
+ os << "managed by Automatic Reference Counting";
+ } else {
+ os << "whose name ('" << MD->getSelector().getAsString()
+ << "') does not start with "
+ "'copy', 'mutableCopy', 'alloc' or 'new'."
+ " This violates the naming convention rules"
+ " given in the Memory Management Guide for Cocoa";
+ }
+ } else {
+ const FunctionDecl *FD = cast<FunctionDecl>(D);
+ os << "whose name ('" << *FD
+ << "') does not contain 'Copy' or 'Create'. This violates the naming"
+ " convention rules given in the Memory Management Guide for Core"
+ " Foundation";
+ }
+ }
+ } else {
+ os << " is not referenced later in this execution path and has a retain "
+ "count of +" << RV->getCount();
+ }
+
+ return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+}
+
+RefCountReport::RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ ExplodedNode *n, SymbolRef sym,
+ bool registerVisitor)
+ : BugReport(D, D.getDescription(), n), Sym(sym) {
+ if (registerVisitor)
+ addVisitor(llvm::make_unique<RefCountReportVisitor>(sym));
+}
+
+RefCountReport::RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ ExplodedNode *n, SymbolRef sym,
+ StringRef endText)
+ : BugReport(D, D.getDescription(), endText, n) {
+
+ addVisitor(llvm::make_unique<RefCountReportVisitor>(sym));
+}
+
+void RefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
+ const SourceManager& SMgr = Ctx.getSourceManager();
+
+ if (!sym->getOriginRegion())
+ return;
+
+ auto *Region = dyn_cast<DeclRegion>(sym->getOriginRegion());
+ if (Region) {
+ const Decl *PDecl = Region->getDecl();
+ if (PDecl && isa<ParmVarDecl>(PDecl)) {
+ PathDiagnosticLocation ParamLocation =
+ PathDiagnosticLocation::create(PDecl, SMgr);
+ Location = ParamLocation;
+ UniqueingLocation = ParamLocation;
+ UniqueingDecl = Ctx.getLocationContext()->getDecl();
+ }
+ }
+}
+
+void RefLeakReport::deriveAllocLocation(CheckerContext &Ctx,
+ SymbolRef sym) {
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path. To do this, we need to find
+ // the allocation site of a piece of tracked memory, which we do via a
+ // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
+ // Note that this is *not* the trimmed graph; we are guaranteed, however,
+ // that all ancestor nodes that represent the allocation site have the
+ // same SourceLocation.
+ const ExplodedNode *AllocNode = nullptr;
+
+ const SourceManager& SMgr = Ctx.getSourceManager();
+
+ AllocationInfo AllocI =
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+
+ AllocNode = AllocI.N;
+ AllocBinding = AllocI.R;
+ markInteresting(AllocI.InterestingMethodContext);
+
+ // Get the SourceLocation for the allocation site.
+ // FIXME: This will crash the analyzer if an allocation comes from an
+ // implicit call (ex: a destructor call).
+ // (Currently there are no such allocations in Cocoa, though.)
+ AllocStmt = PathDiagnosticLocation::getStmt(AllocNode);
+
+ if (!AllocStmt) {
+ AllocBinding = nullptr;
+ return;
+ }
+
+ PathDiagnosticLocation AllocLocation =
+ PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
+ AllocNode->getLocationContext());
+ Location = AllocLocation;
+
+ // Set uniqieing info, which will be used for unique the bug reports. The
+ // leaks should be uniqued on the allocation site.
+ UniqueingLocation = AllocLocation;
+ UniqueingDecl = AllocNode->getLocationContext()->getDecl();
+}
+
+void RefLeakReport::createDescription(CheckerContext &Ctx) {
+ assert(Location.isValid() && UniqueingDecl && UniqueingLocation.isValid());
+ Description.clear();
+ llvm::raw_string_ostream os(Description);
+ os << "Potential leak of an object";
+
+ Optional<std::string> RegionDescription = describeRegion(AllocBinding);
+ if (RegionDescription) {
+ os << " stored into '" << *RegionDescription << '\'';
+ } else {
+
+ // If we can't figure out the name, just supply the type information.
+ os << " of type '" << getPrettyTypeName(Sym->getType()) << "'";
+ }
+}
+
+RefLeakReport::RefLeakReport(RefCountBug &D, const LangOptions &LOpts,
+ ExplodedNode *n, SymbolRef sym,
+ CheckerContext &Ctx)
+ : RefCountReport(D, LOpts, n, sym, false) {
+
+ deriveAllocLocation(Ctx, sym);
+ if (!AllocBinding)
+ deriveParamLocation(Ctx, sym);
+
+ createDescription(Ctx);
+
+ addVisitor(llvm::make_unique<RefLeakReportVisitor>(sym));
+}
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
new file mode 100644
index 000000000000..9f796abe8eae
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -0,0 +1,85 @@
+//== RetainCountDiagnostics.h - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines diagnostics for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_DIAGNOSTICS_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_RETAINCOUNTCHECKER_DIAGNOSTICS_H
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
+
+namespace clang {
+namespace ento {
+namespace retaincountchecker {
+
+class RefCountBug : public BugType {
+protected:
+ RefCountBug(const CheckerBase *checker, StringRef name)
+ : BugType(checker, name, categories::MemoryRefCount) {}
+
+public:
+ virtual const char *getDescription() const = 0;
+
+ virtual bool isLeak() const { return false; }
+};
+
+class RefCountReport : public BugReport {
+protected:
+ SymbolRef Sym;
+
+public:
+ RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ ExplodedNode *n, SymbolRef sym,
+ bool registerVisitor = true);
+
+ RefCountReport(RefCountBug &D, const LangOptions &LOpts,
+ ExplodedNode *n, SymbolRef sym,
+ StringRef endText);
+
+ llvm::iterator_range<ranges_iterator> getRanges() override {
+ const RefCountBug& BugTy = static_cast<RefCountBug&>(getBugType());
+ if (!BugTy.isLeak())
+ return BugReport::getRanges();
+ return llvm::make_range(ranges_iterator(), ranges_iterator());
+ }
+};
+
+class RefLeakReport : public RefCountReport {
+ const MemRegion* AllocBinding;
+ const Stmt *AllocStmt;
+
+ // Finds the function declaration where a leak warning for the parameter
+ // 'sym' should be raised.
+ void deriveParamLocation(CheckerContext &Ctx, SymbolRef sym);
+ // Finds the location where a leak warning for 'sym' should be raised.
+ void deriveAllocLocation(CheckerContext &Ctx, SymbolRef sym);
+ // Produces description of a leak warning which is printed on the console.
+ void createDescription(CheckerContext &Ctx);
+
+public:
+ RefLeakReport(RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n,
+ SymbolRef sym, CheckerContext &Ctx);
+
+ PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
+ assert(Location.isValid());
+ return Location;
+ }
+};
+
+} // end namespace retaincountchecker
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 1952715a9b7c..17ef39531628 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index c5e826a84b84..3e0613e8ba68 100644
--- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -87,7 +87,7 @@ static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
auto Report = llvm::make_unique<BugReport>(BT, BT.getDescription(), N);
Report->addRange(RetE->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, TrackingE ? TrackingE : RetE, *Report);
+ bugreporter::trackExpressionValue(N, TrackingE ? TrackingE : RetE, *Report);
C.emitReport(std::move(Report));
}
diff --git a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index 55516a34d1a7..cf03b3c21132 100644
--- a/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -23,7 +23,7 @@
//===----------------------------------------------------------------------===//
//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
@@ -58,13 +58,12 @@ public:
} // end anonymous namespace
-
-using TriBoolTy = Optional<bool>;
-using MemoizationMapTy = llvm::DenseMap<const Stmt *, Optional<TriBoolTy>>;
-
-static TriBoolTy
-seenBeforeRec(const Stmt *Parent, const Stmt *A, const Stmt *B,
- MemoizationMapTy &Memoization) {
+/// \return Whether {@code A} occurs before {@code B} in traversal of
+/// {@code Parent}.
+/// Conceptually a very incomplete/unsound approximation of happens-before
+/// relationship (A is likely to be evaluated before B),
+/// but useful enough in this case.
+static bool seenBefore(const Stmt *Parent, const Stmt *A, const Stmt *B) {
for (const Stmt *C : Parent->children()) {
if (!C) continue;
@@ -74,26 +73,9 @@ seenBeforeRec(const Stmt *Parent, const Stmt *A, const Stmt *B,
if (C == B)
return false;
- Optional<TriBoolTy> &Cached = Memoization[C];
- if (!Cached)
- Cached = seenBeforeRec(C, A, B, Memoization);
-
- if (Cached->hasValue())
- return Cached->getValue();
+ return seenBefore(C, A, B);
}
-
- return None;
-}
-
-/// \return Whether {@code A} occurs before {@code B} in traversal of
-/// {@code Parent}.
-/// Conceptually a very incomplete/unsound approximation of happens-before
-/// relationship (A is likely to be evaluated before B),
-/// but useful enough in this case.
-static bool seenBefore(const Stmt *Parent, const Stmt *A, const Stmt *B) {
- MemoizationMapTy Memoization;
- TriBoolTy Val = seenBeforeRec(Parent, A, B, Memoization);
- return Val.getValue();
+ return false;
}
static void emitDiagnostics(BoundNodes &Match,
diff --git a/lib/StaticAnalyzer/Checkers/SelectorExtras.h b/lib/StaticAnalyzer/Checkers/SelectorExtras.h
deleted file mode 100644
index b11d070c629b..000000000000
--- a/lib/StaticAnalyzer/Checkers/SelectorExtras.h
+++ /dev/null
@@ -1,46 +0,0 @@
-//=== SelectorExtras.h - Helpers for checkers using selectors -----*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
-
-#include "clang/AST/ASTContext.h"
-
-namespace clang {
-namespace ento {
-
-template <typename... IdentifierInfos>
-static inline Selector getKeywordSelector(ASTContext &Ctx,
- IdentifierInfos *... IIs) {
- static_assert(sizeof...(IdentifierInfos),
- "keyword selectors must have at least one argument");
- SmallVector<IdentifierInfo *, 10> II({&Ctx.Idents.get(IIs)...});
-
- return Ctx.Selectors.getSelector(II.size(), &II[0]);
-}
-
-template <typename... IdentifierInfos>
-static inline void lazyInitKeywordSelector(Selector &Sel, ASTContext &Ctx,
- IdentifierInfos *... IIs) {
- if (!Sel.isNull())
- return;
- Sel = getKeywordSelector(Ctx, IIs...);
-}
-
-static inline void lazyInitNullarySelector(Selector &Sel, ASTContext &Ctx,
- const char *Name) {
- if (!Sel.isNull())
- return;
- Sel = GetNullarySelector(Name, Ctx);
-}
-
-} // end namespace ento
-} // end namespace clang
-
-#endif
diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index ab4b4d3bd91b..819d437e6883 100644
--- a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -15,7 +15,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index e7a20fa03a4a..0f53d826a5f6 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -79,17 +79,17 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
const CompoundLiteralExpr *CL = CR->getLiteralExpr();
os << "stack memory associated with a compound literal "
"declared on line "
- << SM.getExpansionLineNumber(CL->getLocStart()) << " returned to caller";
+ << SM.getExpansionLineNumber(CL->getBeginLoc()) << " returned to caller";
range = CL->getSourceRange();
} else if (const auto *AR = dyn_cast<AllocaRegion>(R)) {
const Expr *ARE = AR->getExpr();
- SourceLocation L = ARE->getLocStart();
+ SourceLocation L = ARE->getBeginLoc();
range = ARE->getSourceRange();
os << "stack memory allocated by call to alloca() on line "
<< SM.getExpansionLineNumber(L);
} else if (const auto *BR = dyn_cast<BlockDataRegion>(R)) {
const BlockDecl *BD = BR->getCodeRegion()->getDecl();
- SourceLocation L = BD->getLocStart();
+ SourceLocation L = BD->getBeginLoc();
range = BD->getSourceRange();
os << "stack-allocated block declared on line "
<< SM.getExpansionLineNumber(L);
diff --git a/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 2f9f5d2d9cf8..6478128ce954 100644
--- a/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -51,7 +51,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index d77975559e3f..92647f032730 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -383,26 +383,26 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
// TODO: Clean up the state.
- for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
- E = SymReaper.dead_end(); I != E; ++I) {
- SymbolRef Sym = *I;
- ProgramStateRef state = C.getState();
- const StreamState *SS = state->get<StreamMap>(Sym);
- if (!SS)
+ const StreamMapTy &Map = state->get<StreamMap>();
+ for (const auto &I: Map) {
+ SymbolRef Sym = I.first;
+ const StreamState &SS = I.second;
+ if (!SymReaper.isDead(Sym) || !SS.isOpened())
continue;
- if (SS->isOpened()) {
- ExplodedNode *N = C.generateErrorNode();
- if (N) {
- if (!BT_ResourceLeak)
- BT_ResourceLeak.reset(new BuiltinBug(
- this, "Resource Leak",
- "Opened File never closed. Potential Resource leak."));
- C.emitReport(llvm::make_unique<BugReport>(
- *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
- }
- }
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT_ResourceLeak)
+ BT_ResourceLeak.reset(
+ new BuiltinBug(this, "Resource Leak",
+ "Opened File never closed. Potential Resource leak."));
+ C.emitReport(llvm::make_unique<BugReport>(
+ *BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
}
}
diff --git a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 2e0529015ca6..3aa8e95d0ad0 100644
--- a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -10,7 +10,7 @@
// This checker can be used for testing how taint data is propagated.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index f4c0edbab3f0..527e371571f1 100644
--- a/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -71,7 +71,6 @@ public:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) override;
};
@@ -95,7 +94,7 @@ public:
REGISTER_SET_WITH_PROGRAMSTATE(DivZeroMap, ZeroState)
std::shared_ptr<PathDiagnosticPiece>
-DivisionBRVisitor::VisitNode(const ExplodedNode *Succ, const ExplodedNode *Pred,
+DivisionBRVisitor::VisitNode(const ExplodedNode *Succ,
BugReporterContext &BRC, BugReport &BR) {
if (Satisfied)
return nullptr;
@@ -180,7 +179,7 @@ void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
}
}
-void TestAfterDivZeroChecker::checkEndFunction(const ReturnStmt *RS,
+void TestAfterDivZeroChecker::checkEndFunction(const ReturnStmt *,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
diff --git a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
index ee185b813611..2f06469bb209 100644
--- a/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -11,7 +11,7 @@
// as it builds the ExplodedGraph.
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -47,7 +47,7 @@ void TraversalDumper::checkBranchCondition(const Stmt *Condition,
// It is mildly evil to print directly to llvm::outs() rather than emitting
// warnings, but this ensures things do not get filtered out by the rest of
// the static analyzer machinery.
- SourceLocation Loc = Parent->getLocStart();
+ SourceLocation Loc = Parent->getBeginLoc();
llvm::outs() << C.getSourceManager().getSpellingLineNumber(Loc) << " "
<< Parent->getStmtClassName() << "\n";
}
diff --git a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index f3d68014224d..5e777803af00 100644
--- a/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -1,4 +1,4 @@
-//== TrustNonnullChecker.cpp - Checker for trusting annotations -*- C++ -*--==//
+//== TrustNonnullChecker.cpp --------- API nullability modeling -*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,20 @@
//
//===----------------------------------------------------------------------===//
//
-// This checker adds an assumption that methods annotated with _Nonnull
+// This checker adds nullability-related assumptions:
+//
+// 1. Methods annotated with _Nonnull
// which come from system headers actually return a non-null pointer.
//
+// 2. NSDictionary key is non-null after the keyword subscript operation
+// on read if and only if the resulting expression is non-null.
+//
+// 3. NSMutableDictionary index is non-null after a write operation.
+//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/Analysis/SelectorExtras.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -22,10 +30,129 @@
using namespace clang;
using namespace ento;
+/// Records implications between symbols.
+/// The semantics is:
+/// (antecedent != 0) => (consequent != 0)
+/// These implications are then read during the evaluation of the assumption,
+/// and the appropriate antecedents are applied.
+REGISTER_MAP_WITH_PROGRAMSTATE(NonNullImplicationMap, SymbolRef, SymbolRef)
+
+/// The semantics is:
+/// (antecedent == 0) => (consequent == 0)
+REGISTER_MAP_WITH_PROGRAMSTATE(NullImplicationMap, SymbolRef, SymbolRef)
+
namespace {
-class TrustNonnullChecker : public Checker<check::PostCall> {
+class TrustNonnullChecker : public Checker<check::PostCall,
+ check::PostObjCMessage,
+ check::DeadSymbols,
+ eval::Assume> {
+ // Do not try to iterate over symbols with higher complexity.
+ static unsigned constexpr ComplexityThreshold = 10;
+ Selector ObjectForKeyedSubscriptSel;
+ Selector ObjectForKeySel;
+ Selector SetObjectForKeyedSubscriptSel;
+ Selector SetObjectForKeySel;
+
+public:
+ TrustNonnullChecker(ASTContext &Ctx)
+ : ObjectForKeyedSubscriptSel(
+ getKeywordSelector(Ctx, "objectForKeyedSubscript")),
+ ObjectForKeySel(getKeywordSelector(Ctx, "objectForKey")),
+ SetObjectForKeyedSubscriptSel(
+ getKeywordSelector(Ctx, "setObject", "forKeyedSubscript")),
+ SetObjectForKeySel(getKeywordSelector(Ctx, "setObject", "forKey")) {}
+
+ ProgramStateRef evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const {
+ const SymbolRef CondS = Cond.getAsSymbol();
+ if (!CondS || CondS->computeComplexity() > ComplexityThreshold)
+ return State;
+
+ for (auto B=CondS->symbol_begin(), E=CondS->symbol_end(); B != E; ++B) {
+ const SymbolRef Antecedent = *B;
+ State = addImplication(Antecedent, State, true);
+ State = addImplication(Antecedent, State, false);
+ }
+
+ return State;
+ }
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ // Only trust annotations for system headers for non-protocols.
+ if (!Call.isInSystemHeader())
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ if (isNonNullPtr(Call, C))
+ if (auto L = Call.getReturnValue().getAs<Loc>())
+ State = State->assume(*L, /*Assumption=*/true);
+
+ C.addTransition(State);
+ }
+
+ void checkPostObjCMessage(const ObjCMethodCall &Msg,
+ CheckerContext &C) const {
+ const ObjCInterfaceDecl *ID = Msg.getReceiverInterface();
+ if (!ID)
+ return;
+
+ ProgramStateRef State = C.getState();
+
+ // Index to setter for NSMutableDictionary is assumed to be non-null,
+ // as an exception is thrown otherwise.
+ if (interfaceHasSuperclass(ID, "NSMutableDictionary") &&
+ (Msg.getSelector() == SetObjectForKeyedSubscriptSel ||
+ Msg.getSelector() == SetObjectForKeySel)) {
+ if (auto L = Msg.getArgSVal(1).getAs<Loc>())
+ State = State->assume(*L, /*Assumption=*/true);
+ }
+
+ // Record an implication: index is non-null if the output is non-null.
+ if (interfaceHasSuperclass(ID, "NSDictionary") &&
+ (Msg.getSelector() == ObjectForKeyedSubscriptSel ||
+ Msg.getSelector() == ObjectForKeySel)) {
+ SymbolRef ArgS = Msg.getArgSVal(0).getAsSymbol();
+ SymbolRef RetS = Msg.getReturnValue().getAsSymbol();
+
+ if (ArgS && RetS) {
+ // Emulate an implication: the argument is non-null if
+ // the return value is non-null.
+ State = State->set<NonNullImplicationMap>(RetS, ArgS);
+
+ // Conversely, when the argument is null, the return value
+ // is definitely null.
+ State = State->set<NullImplicationMap>(ArgS, RetS);
+ }
+ }
+
+ C.addTransition(State);
+ }
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ State = dropDeadFromGDM<NullImplicationMap>(SymReaper, State);
+ State = dropDeadFromGDM<NonNullImplicationMap>(SymReaper, State);
+
+ C.addTransition(State);
+ }
+
private:
+
+ /// \returns State with GDM \p MapName where all dead symbols were
+ // removed.
+ template <typename MapName>
+ ProgramStateRef dropDeadFromGDM(SymbolReaper &SymReaper,
+ ProgramStateRef State) const {
+ for (const std::pair<SymbolRef, SymbolRef> &P : State->get<MapName>())
+ if (!SymReaper.isLive(P.first) || !SymReaper.isLive(P.second))
+ State = State->remove<MapName>(P.first);
+ return State;
+ }
+
/// \returns Whether we trust the result of the method call to be
/// a non-null pointer.
bool isNonNullPtr(const CallEvent &Call, CheckerContext &C) const {
@@ -66,19 +193,57 @@ private:
return false;
}
-public:
- void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
- // Only trust annotations for system headers for non-protocols.
- if (!Call.isInSystemHeader())
- return;
+ /// \return Whether \p ID has a superclass by the name \p ClassName.
+ bool interfaceHasSuperclass(const ObjCInterfaceDecl *ID,
+ StringRef ClassName) const {
+ if (ID->getIdentifier()->getName() == ClassName)
+ return true;
- ProgramStateRef State = C.getState();
+ if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
+ return interfaceHasSuperclass(Super, ClassName);
- if (isNonNullPtr(Call, C))
- if (auto L = Call.getReturnValue().getAs<Loc>())
- State = State->assume(*L, /*Assumption=*/true);
+ return false;
+ }
- C.addTransition(State);
+
+ /// \return a state with an optional implication added (if exists)
+ /// from a map of recorded implications.
+ /// If \p Negated is true, checks NullImplicationMap, and assumes
+ /// the negation of \p Antecedent.
+ /// Checks NonNullImplicationMap and assumes \p Antecedent otherwise.
+ ProgramStateRef addImplication(SymbolRef Antecedent,
+ ProgramStateRef InputState,
+ bool Negated) const {
+ if (!InputState)
+ return nullptr;
+ SValBuilder &SVB = InputState->getStateManager().getSValBuilder();
+ const SymbolRef *Consequent =
+ Negated ? InputState->get<NonNullImplicationMap>(Antecedent)
+ : InputState->get<NullImplicationMap>(Antecedent);
+ if (!Consequent)
+ return InputState;
+
+ SVal AntecedentV = SVB.makeSymbolVal(Antecedent);
+ ProgramStateRef State = InputState;
+
+ if ((Negated && InputState->isNonNull(AntecedentV).isConstrainedTrue())
+ || (!Negated && InputState->isNull(AntecedentV).isConstrainedTrue())) {
+ SVal ConsequentS = SVB.makeSymbolVal(*Consequent);
+ State = InputState->assume(ConsequentS.castAs<DefinedSVal>(), Negated);
+ if (!State)
+ return nullptr;
+
+ // Drop implications from the map.
+ if (Negated) {
+ State = State->remove<NonNullImplicationMap>(Antecedent);
+ State = State->remove<NullImplicationMap>(*Consequent);
+ } else {
+ State = State->remove<NullImplicationMap>(Antecedent);
+ State = State->remove<NonNullImplicationMap>(*Consequent);
+ }
+ }
+
+ return State;
}
};
@@ -86,5 +251,5 @@ public:
void ento::registerTrustNonnullChecker(CheckerManager &Mgr) {
- Mgr.registerChecker<TrustNonnullChecker>();
+ Mgr.registerChecker<TrustNonnullChecker>(Mgr.getASTContext());
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index 934ee63318fa..d7fad4e475ab 100644
--- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -98,7 +98,7 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
// Emit the bug report.
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
- bugreporter::trackNullOrUndefValue(N, Ex, *R);
+ bugreporter::trackExpressionValue(N, Ex, *R);
R->addRange(Ex->getSourceRange());
Ctx.emitReport(std::move(R));
diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 6a93c10c7644..8a625227b81e 100644
--- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index b9a93bedca2e..624cff6048fd 100644
--- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -69,6 +69,7 @@ static bool isLeftShiftResultUnrepresentable(const BinaryOperator *B,
ProgramStateRef State = C.getState();
const llvm::APSInt *LHS = SB.getKnownValue(State, C.getSVal(B->getLHS()));
const llvm::APSInt *RHS = SB.getKnownValue(State, C.getSVal(B->getRHS()));
+ assert(LHS && RHS && "Values unknown, inconsistent state");
return (unsigned)RHS->getZExtValue() > LHS->countLeadingZeros();
}
@@ -122,6 +123,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
<< ((B->getOpcode() == BinaryOperatorKind::BO_Shl) ? "left"
: "right")
<< " shift is undefined because the right operand is negative";
+ Ex = B->getRHS();
} else if ((B->getOpcode() == BinaryOperatorKind::BO_Shl ||
B->getOpcode() == BinaryOperatorKind::BO_Shr) &&
isShiftOverflow(B, C)) {
@@ -130,6 +132,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
<< ((B->getOpcode() == BinaryOperatorKind::BO_Shl) ? "left"
: "right")
<< " shift is undefined due to shifting by ";
+ Ex = B->getRHS();
SValBuilder &SB = C.getSValBuilder();
const llvm::APSInt *I =
@@ -147,6 +150,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
C.isNegative(B->getLHS())) {
OS << "The result of the left shift is undefined because the left "
"operand is negative";
+ Ex = B->getLHS();
} else if (B->getOpcode() == BinaryOperatorKind::BO_Shl &&
isLeftShiftResultUnrepresentable(B, C)) {
ProgramStateRef State = C.getState();
@@ -160,6 +164,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
<< "\', which is unrepresentable in the unsigned version of "
<< "the return type \'" << B->getLHS()->getType().getAsString()
<< "\'";
+ Ex = B->getLHS();
} else {
OS << "The result of the '"
<< BinaryOperator::getOpcodeStr(B->getOpcode())
@@ -169,10 +174,10 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
auto report = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, Ex, *report);
+ bugreporter::trackExpressionValue(N, Ex, *report);
}
else
- bugreporter::trackNullOrUndefValue(N, B, *report);
+ bugreporter::trackExpressionValue(N, B, *report);
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index fe07eafd281f..1d78d7cebd67 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -55,7 +55,7 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
// Generate a report for this bug.
auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+ bugreporter::trackExpressionValue(N, A->getIdx(), *R);
C.emitReport(std::move(R));
}
diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 2ef6855ba6b7..8e10bfdd2f3c 100644
--- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -112,7 +112,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
auto R = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (ex) {
R->addRange(ex->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, ex, *R);
+ bugreporter::trackExpressionValue(N, ex, *R);
}
C.emitReport(std::move(R));
}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
new file mode 100644
index 000000000000..c3291a21c164
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
@@ -0,0 +1,349 @@
+//===----- UninitializedObject.h ---------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper classes for UninitializedObjectChecker and
+// documentation about the logic of it.
+//
+// The checker reports uninitialized fields in objects created after a
+// constructor call.
+//
+// This checker has several options:
+// - "Pedantic" (boolean). If its not set or is set to false, the checker
+// won't emit warnings for objects that don't have at least one initialized
+// field. This may be set with
+//
+// `-analyzer-config alpha.cplusplus.UninitializedObject:Pedantic=true`.
+//
+// - "NotesAsWarnings" (boolean). If set to true, the checker will emit a
+// warning for each uninitialized field, as opposed to emitting one warning
+// per constructor call, and listing the uninitialized fields that belongs
+// to it in notes. Defaults to false.
+//
+// `-analyzer-config \
+// alpha.cplusplus.UninitializedObject:NotesAsWarnings=true`.
+//
+// - "CheckPointeeInitialization" (boolean). If set to false, the checker will
+// not analyze the pointee of pointer/reference fields, and will only check
+// whether the object itself is initialized. Defaults to false.
+//
+// `-analyzer-config \
+// alpha.cplusplus.UninitializedObject:CheckPointeeInitialization=true`.
+//
+// - "IgnoreRecordsWithField" (string). If supplied, the checker will not
+// analyze structures that have a field with a name or type name that
+// matches the given pattern. Defaults to "".
+//
+// `-analyzer-config \
+// alpha.cplusplus.UninitializedObject:IgnoreRecordsWithField="[Tt]ag|[Kk]ind"`.
+//
+// TODO: With some clever heuristics, some pointers should be dereferenced
+// by default. For example, if the pointee is constructed within the
+// constructor call, it's reasonable to say that no external object
+// references it, and we wouldn't generate multiple report on the same
+// pointee.
+//
+// Most of the following methods as well as the checker itself is defined in
+// UninitializedObjectChecker.cpp.
+//
+// Some methods are implemented in UninitializedPointee.cpp, to reduce the
+// complexity of the main checker file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_UNINITIALIZEDOBJECT_H
+#define LLVM_CLANG_STATICANALYZER_UNINITIALIZEDOBJECT_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+namespace clang {
+namespace ento {
+
+struct UninitObjCheckerOptions {
+ bool IsPedantic = false;
+ bool ShouldConvertNotesToWarnings = false;
+ bool CheckPointeeInitialization = false;
+ std::string IgnoredRecordsWithFieldPattern;
+};
+
+/// A lightweight polymorphic wrapper around FieldRegion *. We'll use this
+/// interface to store addinitional information about fields. As described
+/// later, a list of these objects (i.e. "fieldchain") will be constructed and
+/// used for printing note messages should an uninitialized value be found.
+class FieldNode {
+protected:
+ const FieldRegion *FR;
+
+ /// FieldNodes are never meant to be created on the heap, see
+ /// FindUninitializedFields::addFieldToUninits().
+ /* non-virtual */ ~FieldNode() = default;
+
+public:
+ FieldNode(const FieldRegion *FR) : FR(FR) {}
+
+ // We'll delete all of these special member functions to force the users of
+ // this interface to only store references to FieldNode objects in containers.
+ FieldNode() = delete;
+ FieldNode(const FieldNode &) = delete;
+ FieldNode(FieldNode &&) = delete;
+ FieldNode &operator=(const FieldNode &) = delete;
+ FieldNode &operator=(const FieldNode &&) = delete;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const { ID.AddPointer(this); }
+
+ /// Helper method for uniqueing.
+ bool isSameRegion(const FieldRegion *OtherFR) const {
+ // Special FieldNode descendants may wrap nullpointers (for example if they
+ // describe a special relationship between two elements of the fieldchain)
+ // -- we wouldn't like to unique these objects.
+ if (FR == nullptr)
+ return false;
+
+ return FR == OtherFR;
+ }
+
+ const FieldRegion *getRegion() const { return FR; }
+ const FieldDecl *getDecl() const {
+ assert(FR);
+ return FR->getDecl();
+ }
+
+ // When a fieldchain is printed, it will have the following format (without
+ // newline, indices are in order of insertion, from 1 to n):
+ //
+ // <note_message_n>'<prefix_n><prefix_n-1>...<prefix_1>
+ // this-><node_1><separator_1><node_2><separator_2>...<node_n>'
+
+ /// If this is the last element of the fieldchain, this method will print the
+ /// note message associated with it.
+ /// The note message should state something like "uninitialized field" or
+ /// "uninitialized pointee" etc.
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const = 0;
+
+ /// Print any prefixes before the fieldchain. Could contain casts, etc.
+ virtual void printPrefix(llvm::raw_ostream &Out) const = 0;
+
+ /// Print the node. Should contain the name of the field stored in FR.
+ virtual void printNode(llvm::raw_ostream &Out) const = 0;
+
+ /// Print the separator. For example, fields may be separated with '.' or
+ /// "->".
+ virtual void printSeparator(llvm::raw_ostream &Out) const = 0;
+
+ virtual bool isBase() const { return false; }
+};
+
+/// Returns with Field's name. This is a helper function to get the correct name
+/// even if Field is a captured lambda variable.
+std::string getVariableName(const FieldDecl *Field);
+
+/// Represents a field chain. A field chain is a list of fields where the first
+/// element of the chain is the object under checking (not stored), and every
+/// other element is a field, and the element that precedes it is the object
+/// that contains it.
+///
+/// Note that this class is immutable (essentially a wrapper around an
+/// ImmutableList), new FieldChainInfo objects may be created by member
+/// functions such as add() and replaceHead().
+class FieldChainInfo {
+public:
+ using FieldChain = llvm::ImmutableList<const FieldNode &>;
+
+private:
+ FieldChain::Factory &ChainFactory;
+ FieldChain Chain;
+
+ FieldChainInfo(FieldChain::Factory &F, FieldChain NewChain)
+ : FieldChainInfo(F) {
+ Chain = NewChain;
+ }
+
+public:
+ FieldChainInfo() = delete;
+ FieldChainInfo(FieldChain::Factory &F) : ChainFactory(F) {}
+ FieldChainInfo(const FieldChainInfo &Other) = default;
+
+ /// Constructs a new FieldChainInfo object with \p FN appended.
+ template <class FieldNodeT> FieldChainInfo add(const FieldNodeT &FN);
+
+ /// Constructs a new FieldChainInfo object with \p FN as the new head of the
+ /// list.
+ template <class FieldNodeT> FieldChainInfo replaceHead(const FieldNodeT &FN);
+
+ bool contains(const FieldRegion *FR) const;
+ bool isEmpty() const { return Chain.isEmpty(); }
+
+ const FieldNode &getHead() const { return Chain.getHead(); }
+ const FieldRegion *getUninitRegion() const { return getHead().getRegion(); }
+
+ void printNoteMsg(llvm::raw_ostream &Out) const;
+};
+
+using UninitFieldMap = std::map<const FieldRegion *, llvm::SmallString<50>>;
+
+/// Searches for and stores uninitialized fields in a non-union object.
+class FindUninitializedFields {
+ ProgramStateRef State;
+ const TypedValueRegion *const ObjectR;
+
+ const UninitObjCheckerOptions Opts;
+ bool IsAnyFieldInitialized = false;
+
+ FieldChainInfo::FieldChain::Factory ChainFactory;
+
+ /// A map for assigning uninitialized regions to note messages. For example,
+ ///
+ /// struct A {
+ /// int x;
+ /// };
+ ///
+ /// A a;
+ ///
+ /// After analyzing `a`, the map will contain a pair for `a.x`'s region and
+ /// the note message "uninitialized field 'this->x'.
+ UninitFieldMap UninitFields;
+
+public:
+ /// Constructs the FindUninitializedField object, searches for and stores
+ /// uninitialized fields in R.
+ FindUninitializedFields(ProgramStateRef State,
+ const TypedValueRegion *const R,
+ const UninitObjCheckerOptions &Opts);
+
+ /// Returns with the modified state and a map of (uninitialized region,
+ /// note message) pairs.
+ std::pair<ProgramStateRef, const UninitFieldMap &> getResults() {
+ return {State, UninitFields};
+ }
+
+ /// Returns whether the analyzed region contains at least one initialized
+ /// field. Note that this includes subfields as well, not just direct ones,
+ /// and will return false if an uninitialized pointee is found with
+ /// CheckPointeeInitialization enabled.
+ bool isAnyFieldInitialized() { return IsAnyFieldInitialized; }
+
+private:
+ // For the purposes of this checker, we'll regard the analyzed region as a
+ // directed tree, where
+ // * the root is the object under checking
+ // * every node is an object that is
+ // - a union
+ // - a non-union record
+ // - dereferenceable (see isDereferencableType())
+ // - an array
+ // - of a primitive type (see isPrimitiveType())
+ // * the parent of each node is the object that contains it
+ // * every leaf is an array, a primitive object, a nullptr or an undefined
+ // pointer.
+ //
+ // Example:
+ //
+ // struct A {
+ // struct B {
+ // int x, y = 0;
+ // };
+ // B b;
+ // int *iptr = new int;
+ // B* bptr;
+ //
+ // A() {}
+ // };
+ //
+ // The directed tree:
+ //
+ // ->x
+ // /
+ // ->b--->y
+ // /
+ // A-->iptr->(int value)
+ // \
+ // ->bptr
+ //
+ // From this we'll construct a vector of fieldchains, where each fieldchain
+ // represents an uninitialized field. An uninitialized field may be a
+ // primitive object, a pointer, a pointee or a union without a single
+ // initialized field.
+ // In the above example, for the default constructor call we'll end up with
+ // these fieldchains:
+ //
+ // this->b.x
+ // this->iptr (pointee uninit)
+ // this->bptr (pointer uninit)
+ //
+ // We'll traverse each node of the above graph with the appropriate one of
+ // these methods:
+
+ /// Checks the region of a union object, and returns true if no field is
+ /// initialized within the region.
+ bool isUnionUninit(const TypedValueRegion *R);
+
+ /// Checks a region of a non-union object, and returns true if an
+ /// uninitialized field is found within the region.
+ bool isNonUnionUninit(const TypedValueRegion *R, FieldChainInfo LocalChain);
+
+ /// Checks a region of a pointer or reference object, and returns true if the
+ /// ptr/ref object itself or any field within the pointee's region is
+ /// uninitialized.
+ bool isDereferencableUninit(const FieldRegion *FR, FieldChainInfo LocalChain);
+
+ /// Returns true if the value of a primitive object is uninitialized.
+ bool isPrimitiveUninit(const SVal &V);
+
+ // Note that we don't have a method for arrays -- the elements of an array are
+ // often left uninitialized intentionally even when it is of a C++ record
+ // type, so we'll assume that an array is always initialized.
+ // TODO: Add a support for nonloc::LocAsInteger.
+
+ /// Processes LocalChain and attempts to insert it into UninitFields. Returns
+ /// true on success. Also adds the head of the list and \p PointeeR (if
+ /// supplied) to the GDM as already analyzed objects.
+ ///
+ /// Since this class analyzes regions with recursion, we'll only store
+ /// references to temporary FieldNode objects created on the stack. This means
+ /// that after analyzing a leaf of the directed tree described above, the
+ /// elements LocalChain references will be destructed, so we can't store it
+ /// directly.
+ bool addFieldToUninits(FieldChainInfo LocalChain,
+ const MemRegion *PointeeR = nullptr);
+};
+
+/// Returns true if T is a primitive type. An object of a primitive type only
+/// needs to be analyzed as much as checking whether their value is undefined.
+inline bool isPrimitiveType(const QualType &T) {
+ return T->isBuiltinType() || T->isEnumeralType() ||
+ T->isMemberPointerType() || T->isBlockPointerType() ||
+ T->isFunctionType();
+}
+
+inline bool isDereferencableType(const QualType &T) {
+ return T->isAnyPointerType() || T->isReferenceType();
+}
+
+// Template method definitions.
+
+template <class FieldNodeT>
+inline FieldChainInfo FieldChainInfo::add(const FieldNodeT &FN) {
+ assert(!contains(FN.getRegion()) &&
+ "Can't add a field that is already a part of the "
+ "fieldchain! Is this a cyclic reference?");
+
+ FieldChainInfo NewChain = *this;
+ NewChain.Chain = ChainFactory.add(FN, Chain);
+ return NewChain;
+}
+
+template <class FieldNodeT>
+inline FieldChainInfo FieldChainInfo::replaceHead(const FieldNodeT &FN) {
+ FieldChainInfo NewChain(ChainFactory, Chain.getTail());
+ return NewChain.add(FN);
+}
+
+} // end of namespace ento
+} // end of namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_UNINITIALIZEDOBJECT_H
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
new file mode 100644
index 000000000000..208e303e8295
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -0,0 +1,538 @@
+//===----- UninitializedObjectChecker.cpp ------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that reports uninitialized fields in objects
+// created after a constructor call.
+//
+// To read about command line options and how the checker works, refer to the
+// top of the file and inline comments in UninitializedObject.h.
+//
+// Some of the logic is implemented in UninitializedPointee.cpp, to reduce the
+// complexity of this file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "UninitializedObject.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+
+using namespace clang;
+using namespace clang::ento;
+
+/// We'll mark fields (and pointee of fields) that are confirmed to be
+/// uninitialized as already analyzed.
+REGISTER_SET_WITH_PROGRAMSTATE(AnalyzedRegions, const MemRegion *)
+
+namespace {
+
+class UninitializedObjectChecker
+ : public Checker<check::EndFunction, check::DeadSymbols> {
+ std::unique_ptr<BuiltinBug> BT_uninitField;
+
+public:
+ // The fields of this struct will be initialized when registering the checker.
+ UninitObjCheckerOptions Opts;
+
+ UninitializedObjectChecker()
+ : BT_uninitField(new BuiltinBug(this, "Uninitialized fields")) {}
+
+ void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+};
+
+/// A basic field type, that is not a pointer or a reference, it's dynamic and
+/// static type is the same.
+class RegularField final : public FieldNode {
+public:
+ RegularField(const FieldRegion *FR) : FieldNode(FR) {}
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ Out << "uninitialized field ";
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << getVariableName(getDecl());
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ Out << '.';
+ }
+};
+
+/// Represents that the FieldNode that comes after this is declared in a base
+/// of the previous FieldNode. As such, this descendant doesn't wrap a
+/// FieldRegion, and is purely a tool to describe a relation between two other
+/// FieldRegion wrapping descendants.
+class BaseClass final : public FieldNode {
+ const QualType BaseClassT;
+
+public:
+ BaseClass(const QualType &T) : FieldNode(nullptr), BaseClassT(T) {
+ assert(!T.isNull());
+ assert(T->getAsCXXRecordDecl());
+ }
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ llvm_unreachable("This node can never be the final node in the "
+ "fieldchain!");
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << BaseClassT->getAsCXXRecordDecl()->getName() << "::";
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {}
+
+ virtual bool isBase() const override { return true; }
+};
+
+} // end of anonymous namespace
+
+// Utility function declarations.
+
+/// Returns the region that was constructed by CtorDecl, or nullptr if that
+/// isn't possible.
+static const TypedValueRegion *
+getConstructedRegion(const CXXConstructorDecl *CtorDecl,
+ CheckerContext &Context);
+
+/// Checks whether the object constructed by \p Ctor will be analyzed later
+/// (e.g. if the object is a field of another object, in which case we'd check
+/// it multiple times).
+static bool willObjectBeAnalyzedLater(const CXXConstructorDecl *Ctor,
+ CheckerContext &Context);
+
+/// Checks whether RD contains a field with a name or type name that matches
+/// \p Pattern.
+static bool shouldIgnoreRecord(const RecordDecl *RD, StringRef Pattern);
+
+//===----------------------------------------------------------------------===//
+// Methods for UninitializedObjectChecker.
+//===----------------------------------------------------------------------===//
+
+void UninitializedObjectChecker::checkEndFunction(
+ const ReturnStmt *RS, CheckerContext &Context) const {
+
+ const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(
+ Context.getLocationContext()->getDecl());
+ if (!CtorDecl)
+ return;
+
+ if (!CtorDecl->isUserProvided())
+ return;
+
+ if (CtorDecl->getParent()->isUnion())
+ return;
+
+ // This avoids essentially the same error being reported multiple times.
+ if (willObjectBeAnalyzedLater(CtorDecl, Context))
+ return;
+
+ const TypedValueRegion *R = getConstructedRegion(CtorDecl, Context);
+ if (!R)
+ return;
+
+ FindUninitializedFields F(Context.getState(), R, Opts);
+
+ std::pair<ProgramStateRef, const UninitFieldMap &> UninitInfo =
+ F.getResults();
+
+ ProgramStateRef UpdatedState = UninitInfo.first;
+ const UninitFieldMap &UninitFields = UninitInfo.second;
+
+ if (UninitFields.empty()) {
+ Context.addTransition(UpdatedState);
+ return;
+ }
+
+ // There are uninitialized fields in the record.
+
+ ExplodedNode *Node = Context.generateNonFatalErrorNode(UpdatedState);
+ if (!Node)
+ return;
+
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const Stmt *CallSite = Context.getStackFrame()->getCallSite();
+ if (CallSite)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ CallSite, Context.getSourceManager(), Node->getLocationContext());
+
+ // For Plist consumers that don't support notes just yet, we'll convert notes
+ // to warnings.
+ if (Opts.ShouldConvertNotesToWarnings) {
+ for (const auto &Pair : UninitFields) {
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_uninitField, Pair.second, Node, LocUsedForUniqueing,
+ Node->getLocationContext()->getDecl());
+ Context.emitReport(std::move(Report));
+ }
+ return;
+ }
+
+ SmallString<100> WarningBuf;
+ llvm::raw_svector_ostream WarningOS(WarningBuf);
+ WarningOS << UninitFields.size() << " uninitialized field"
+ << (UninitFields.size() == 1 ? "" : "s")
+ << " at the end of the constructor call";
+
+ auto Report = llvm::make_unique<BugReport>(
+ *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
+ Node->getLocationContext()->getDecl());
+
+ for (const auto &Pair : UninitFields) {
+ Report->addNote(Pair.second,
+ PathDiagnosticLocation::create(Pair.first->getDecl(),
+ Context.getSourceManager()));
+ }
+ Context.emitReport(std::move(Report));
+}
+
+void UninitializedObjectChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ for (const MemRegion *R : State->get<AnalyzedRegions>()) {
+ if (!SR.isLiveRegion(R))
+ State = State->remove<AnalyzedRegions>(R);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for FindUninitializedFields.
+//===----------------------------------------------------------------------===//
+
+FindUninitializedFields::FindUninitializedFields(
+ ProgramStateRef State, const TypedValueRegion *const R,
+ const UninitObjCheckerOptions &Opts)
+ : State(State), ObjectR(R), Opts(Opts) {
+
+ isNonUnionUninit(ObjectR, FieldChainInfo(ChainFactory));
+
+ // In non-pedantic mode, if ObjectR doesn't contain a single initialized
+ // field, we'll assume that Object was intentionally left uninitialized.
+ if (!Opts.IsPedantic && !isAnyFieldInitialized())
+ UninitFields.clear();
+}
+
+bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain,
+ const MemRegion *PointeeR) {
+ const FieldRegion *FR = Chain.getUninitRegion();
+
+ assert((PointeeR || !isDereferencableType(FR->getDecl()->getType())) &&
+ "One must also pass the pointee region as a parameter for "
+ "dereferenceable fields!");
+
+ if (State->contains<AnalyzedRegions>(FR))
+ return false;
+
+ if (PointeeR) {
+ if (State->contains<AnalyzedRegions>(PointeeR)) {
+ return false;
+ }
+ State = State->add<AnalyzedRegions>(PointeeR);
+ }
+
+ State = State->add<AnalyzedRegions>(FR);
+
+ if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
+ FR->getDecl()->getLocation()))
+ return false;
+
+ UninitFieldMap::mapped_type NoteMsgBuf;
+ llvm::raw_svector_ostream OS(NoteMsgBuf);
+ Chain.printNoteMsg(OS);
+ return UninitFields.insert({FR, std::move(NoteMsgBuf)}).second;
+}
+
+bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
+ FieldChainInfo LocalChain) {
+ assert(R->getValueType()->isRecordType() &&
+ !R->getValueType()->isUnionType() &&
+ "This method only checks non-union record objects!");
+
+ const RecordDecl *RD = R->getValueType()->getAsRecordDecl()->getDefinition();
+
+ if (!RD) {
+ IsAnyFieldInitialized = true;
+ return true;
+ }
+
+ if (!Opts.IgnoredRecordsWithFieldPattern.empty() &&
+ shouldIgnoreRecord(RD, Opts.IgnoredRecordsWithFieldPattern)) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ bool ContainsUninitField = false;
+
+ // Are all of this non-union's fields initialized?
+ for (const FieldDecl *I : RD->fields()) {
+
+ const auto FieldVal =
+ State->getLValue(I, loc::MemRegionVal(R)).castAs<loc::MemRegionVal>();
+ const auto *FR = FieldVal.getRegionAs<FieldRegion>();
+ QualType T = I->getType();
+
+ // If LocalChain already contains FR, then we encountered a cyclic
+ // reference. In this case, region FR is already under checking at an
+ // earlier node in the directed tree.
+ if (LocalChain.contains(FR))
+ return false;
+
+ if (T->isStructureOrClassType()) {
+ if (isNonUnionUninit(FR, LocalChain.add(RegularField(FR))))
+ ContainsUninitField = true;
+ continue;
+ }
+
+ if (T->isUnionType()) {
+ if (isUnionUninit(FR)) {
+ if (addFieldToUninits(LocalChain.add(RegularField(FR))))
+ ContainsUninitField = true;
+ } else
+ IsAnyFieldInitialized = true;
+ continue;
+ }
+
+ if (T->isArrayType()) {
+ IsAnyFieldInitialized = true;
+ continue;
+ }
+
+ SVal V = State->getSVal(FieldVal);
+
+ if (isDereferencableType(T) || V.getAs<nonloc::LocAsInteger>()) {
+ if (isDereferencableUninit(FR, LocalChain))
+ ContainsUninitField = true;
+ continue;
+ }
+
+ if (isPrimitiveType(T)) {
+ if (isPrimitiveUninit(V)) {
+ if (addFieldToUninits(LocalChain.add(RegularField(FR))))
+ ContainsUninitField = true;
+ }
+ continue;
+ }
+
+ llvm_unreachable("All cases are handled!");
+ }
+
+ // Checking bases. The checker will regard inherited data members as direct
+ // fields.
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ if (!CXXRD)
+ return ContainsUninitField;
+
+ for (const CXXBaseSpecifier &BaseSpec : CXXRD->bases()) {
+ const auto *BaseRegion = State->getLValue(BaseSpec, R)
+ .castAs<loc::MemRegionVal>()
+ .getRegionAs<TypedValueRegion>();
+
+ // If the head of the list is also a BaseClass, we'll overwrite it to avoid
+ // note messages like 'this->A::B::x'.
+ if (!LocalChain.isEmpty() && LocalChain.getHead().isBase()) {
+ if (isNonUnionUninit(BaseRegion, LocalChain.replaceHead(
+ BaseClass(BaseSpec.getType()))))
+ ContainsUninitField = true;
+ } else {
+ if (isNonUnionUninit(BaseRegion,
+ LocalChain.add(BaseClass(BaseSpec.getType()))))
+ ContainsUninitField = true;
+ }
+ }
+
+ return ContainsUninitField;
+}
+
+bool FindUninitializedFields::isUnionUninit(const TypedValueRegion *R) {
+ assert(R->getValueType()->isUnionType() &&
+ "This method only checks union objects!");
+ // TODO: Implement support for union fields.
+ return false;
+}
+
+bool FindUninitializedFields::isPrimitiveUninit(const SVal &V) {
+ if (V.isUndef())
+ return true;
+
+ IsAnyFieldInitialized = true;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for FieldChainInfo.
+//===----------------------------------------------------------------------===//
+
+bool FieldChainInfo::contains(const FieldRegion *FR) const {
+ for (const FieldNode &Node : Chain) {
+ if (Node.isSameRegion(FR))
+ return true;
+ }
+ return false;
+}
+
+/// Prints every element except the last to `Out`. Since ImmutableLists store
+/// elements in reverse order, and have no reverse iterators, we use a
+/// recursive function to print the fieldchain correctly. The last element in
+/// the chain is to be printed by `FieldChainInfo::print`.
+static void printTail(llvm::raw_ostream &Out,
+ const FieldChainInfo::FieldChain L);
+
+// FIXME: This function constructs an incorrect string in the following case:
+//
+// struct Base { int x; };
+// struct D1 : Base {}; struct D2 : Base {};
+//
+// struct MostDerived : D1, D2 {
+// MostDerived() {}
+// }
+//
+// A call to MostDerived::MostDerived() will cause two notes that say
+// "uninitialized field 'this->x'", but we can't refer to 'x' directly,
+// we need an explicit namespace resolution whether the uninit field was
+// 'D1::x' or 'D2::x'.
+void FieldChainInfo::printNoteMsg(llvm::raw_ostream &Out) const {
+ if (Chain.isEmpty())
+ return;
+
+ const FieldNode &LastField = getHead();
+
+ LastField.printNoteMsg(Out);
+ Out << '\'';
+
+ for (const FieldNode &Node : Chain)
+ Node.printPrefix(Out);
+
+ Out << "this->";
+ printTail(Out, Chain.getTail());
+ LastField.printNode(Out);
+ Out << '\'';
+}
+
+static void printTail(llvm::raw_ostream &Out,
+ const FieldChainInfo::FieldChain L) {
+ if (L.isEmpty())
+ return;
+
+ printTail(Out, L.getTail());
+
+ L.getHead().printNode(Out);
+ L.getHead().printSeparator(Out);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static const TypedValueRegion *
+getConstructedRegion(const CXXConstructorDecl *CtorDecl,
+ CheckerContext &Context) {
+
+ Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl,
+ Context.getStackFrame());
+
+ SVal ObjectV = Context.getState()->getSVal(ThisLoc);
+
+ auto *R = ObjectV.getAsRegion()->getAs<TypedValueRegion>();
+ if (R && !R->getValueType()->getAsCXXRecordDecl())
+ return nullptr;
+
+ return R;
+}
+
+static bool willObjectBeAnalyzedLater(const CXXConstructorDecl *Ctor,
+ CheckerContext &Context) {
+
+ const TypedValueRegion *CurrRegion = getConstructedRegion(Ctor, Context);
+ if (!CurrRegion)
+ return false;
+
+ const LocationContext *LC = Context.getLocationContext();
+ while ((LC = LC->getParent())) {
+
+ // If \p Ctor was called by another constructor.
+ const auto *OtherCtor = dyn_cast<CXXConstructorDecl>(LC->getDecl());
+ if (!OtherCtor)
+ continue;
+
+ const TypedValueRegion *OtherRegion =
+ getConstructedRegion(OtherCtor, Context);
+ if (!OtherRegion)
+ continue;
+
+ // If the CurrRegion is a subregion of OtherRegion, it will be analyzed
+ // during the analysis of OtherRegion.
+ if (CurrRegion->isSubRegionOf(OtherRegion))
+ return true;
+ }
+
+ return false;
+}
+
+static bool shouldIgnoreRecord(const RecordDecl *RD, StringRef Pattern) {
+ llvm::Regex R(Pattern);
+
+ for (const FieldDecl *FD : RD->fields()) {
+ if (R.match(FD->getType().getAsString()))
+ return true;
+ if (R.match(FD->getName()))
+ return true;
+ }
+
+ return false;
+}
+
+std::string clang::ento::getVariableName(const FieldDecl *Field) {
+ // If Field is a captured lambda variable, Field->getName() will return with
+ // an empty string. We can however acquire it's name from the lambda's
+ // captures.
+ const auto *CXXParent = dyn_cast<CXXRecordDecl>(Field->getParent());
+
+ if (CXXParent && CXXParent->isLambda()) {
+ assert(CXXParent->captures_begin());
+ auto It = CXXParent->captures_begin() + Field->getFieldIndex();
+
+ if (It->capturesVariable())
+ return llvm::Twine("/*captured variable*/" +
+ It->getCapturedVar()->getName())
+ .str();
+
+ if (It->capturesThis())
+ return "/*'this' capture*/";
+
+ llvm_unreachable("No other capture type is expected!");
+ }
+
+ return Field->getName();
+}
+
+void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
+ auto Chk = Mgr.registerChecker<UninitializedObjectChecker>();
+
+ AnalyzerOptions &AnOpts = Mgr.getAnalyzerOptions();
+ UninitObjCheckerOptions &ChOpts = Chk->Opts;
+
+ ChOpts.IsPedantic =
+ AnOpts.getCheckerBooleanOption("Pedantic", /*DefaultVal*/ false, Chk);
+ ChOpts.ShouldConvertNotesToWarnings =
+ AnOpts.getCheckerBooleanOption("NotesAsWarnings", /*DefaultVal*/ false, Chk);
+ ChOpts.CheckPointeeInitialization = AnOpts.getCheckerBooleanOption(
+ "CheckPointeeInitialization", /*DefaultVal*/ false, Chk);
+ ChOpts.IgnoredRecordsWithFieldPattern =
+ AnOpts.getCheckerStringOption("IgnoreRecordsWithField",
+ /*DefaultVal*/ "", Chk);
+}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
new file mode 100644
index 000000000000..aead59c7bf87
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -0,0 +1,282 @@
+//===----- UninitializedPointee.cpp ------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions and methods for handling pointers and references
+// to reduce the size and complexity of UninitializedObjectChecker.cpp.
+//
+// To read about command line options and documentation about how the checker
+// works, refer to UninitializedObjectChecker.h.
+//
+//===----------------------------------------------------------------------===//
+
+#include "UninitializedObject.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeMap.h"
+
+using namespace clang;
+using namespace clang::ento;
+
+namespace {
+
+/// Represents a pointer or a reference field.
+class LocField final : public FieldNode {
+ /// We'll store whether the pointee or the pointer itself is uninitialited.
+ const bool IsDereferenced;
+
+public:
+ LocField(const FieldRegion *FR, const bool IsDereferenced = true)
+ : FieldNode(FR), IsDereferenced(IsDereferenced) {}
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ if (IsDereferenced)
+ Out << "uninitialized pointee ";
+ else
+ Out << "uninitialized pointer ";
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << getVariableName(getDecl());
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ if (getDecl()->getType()->isPointerType())
+ Out << "->";
+ else
+ Out << '.';
+ }
+};
+
+/// Represents a nonloc::LocAsInteger or void* field, that point to objects, but
+/// needs to be casted back to its dynamic type for a correct note message.
+class NeedsCastLocField final : public FieldNode {
+ QualType CastBackType;
+
+public:
+ NeedsCastLocField(const FieldRegion *FR, const QualType &T)
+ : FieldNode(FR), CastBackType(T) {}
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ Out << "uninitialized pointee ";
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {
+ // If this object is a nonloc::LocAsInteger.
+ if (getDecl()->getType()->isIntegerType())
+ Out << "reinterpret_cast";
+ // If this pointer's dynamic type is different then it's static type.
+ else
+ Out << "static_cast";
+ Out << '<' << CastBackType.getAsString() << ">(";
+ }
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << getVariableName(getDecl()) << ')';
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ Out << "->";
+ }
+};
+
+/// Represents a Loc field that points to itself.
+class CyclicLocField final : public FieldNode {
+
+public:
+ CyclicLocField(const FieldRegion *FR) : FieldNode(FR) {}
+
+ virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ Out << "object references itself ";
+ }
+
+ virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+
+ virtual void printNode(llvm::raw_ostream &Out) const override {
+ Out << getVariableName(getDecl());
+ }
+
+ virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ llvm_unreachable("CyclicLocField objects must be the last node of the "
+ "fieldchain!");
+ }
+};
+
+} // end of anonymous namespace
+
+// Utility function declarations.
+
+struct DereferenceInfo {
+ const TypedValueRegion *R;
+ const bool NeedsCastBack;
+ const bool IsCyclic;
+ DereferenceInfo(const TypedValueRegion *R, bool NCB, bool IC)
+ : R(R), NeedsCastBack(NCB), IsCyclic(IC) {}
+};
+
+/// Dereferences \p FR and returns with the pointee's region, and whether it
+/// needs to be casted back to it's location type. If for whatever reason
+/// dereferencing fails, returns with None.
+static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
+ const FieldRegion *FR);
+
+/// Returns whether \p T can be (transitively) dereferenced to a void pointer
+/// type (void*, void**, ...).
+static bool isVoidPointer(QualType T);
+
+//===----------------------------------------------------------------------===//
+// Methods for FindUninitializedFields.
+//===----------------------------------------------------------------------===//
+
+bool FindUninitializedFields::isDereferencableUninit(
+ const FieldRegion *FR, FieldChainInfo LocalChain) {
+
+ SVal V = State->getSVal(FR);
+
+ assert((isDereferencableType(FR->getDecl()->getType()) ||
+ V.getAs<nonloc::LocAsInteger>()) &&
+ "This method only checks dereferenceable objects!");
+
+ if (V.isUnknown() || V.getAs<loc::ConcreteInt>()) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ if (V.isUndef()) {
+ return addFieldToUninits(
+ LocalChain.add(LocField(FR, /*IsDereferenced*/ false)), FR);
+ }
+
+ if (!Opts.CheckPointeeInitialization) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ // At this point the pointer itself is initialized and points to a valid
+ // location, we'll now check the pointee.
+ llvm::Optional<DereferenceInfo> DerefInfo = dereference(State, FR);
+ if (!DerefInfo) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ if (DerefInfo->IsCyclic)
+ return addFieldToUninits(LocalChain.add(CyclicLocField(FR)), FR);
+
+ const TypedValueRegion *R = DerefInfo->R;
+ const bool NeedsCastBack = DerefInfo->NeedsCastBack;
+
+ QualType DynT = R->getLocationType();
+ QualType PointeeT = DynT->getPointeeType();
+
+ if (PointeeT->isStructureOrClassType()) {
+ if (NeedsCastBack)
+ return isNonUnionUninit(R, LocalChain.add(NeedsCastLocField(FR, DynT)));
+ return isNonUnionUninit(R, LocalChain.add(LocField(FR)));
+ }
+
+ if (PointeeT->isUnionType()) {
+ if (isUnionUninit(R)) {
+ if (NeedsCastBack)
+ return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)),
+ R);
+ return addFieldToUninits(LocalChain.add(LocField(FR)), R);
+ } else {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+ }
+
+ if (PointeeT->isArrayType()) {
+ IsAnyFieldInitialized = true;
+ return false;
+ }
+
+ assert((isPrimitiveType(PointeeT) || isDereferencableType(PointeeT)) &&
+ "At this point FR must either have a primitive dynamic type, or it "
+ "must be a null, undefined, unknown or concrete pointer!");
+
+ SVal PointeeV = State->getSVal(R);
+
+ if (isPrimitiveUninit(PointeeV)) {
+ if (NeedsCastBack)
+ return addFieldToUninits(LocalChain.add(NeedsCastLocField(FR, DynT)), R);
+ return addFieldToUninits(LocalChain.add(LocField(FR)), R);
+ }
+
+ IsAnyFieldInitialized = true;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
+ const FieldRegion *FR) {
+
+ llvm::SmallSet<const TypedValueRegion *, 5> VisitedRegions;
+
+ SVal V = State->getSVal(FR);
+ assert(V.getAsRegion() && "V must have an underlying region!");
+
+ // If the static type of the field is a void pointer, or it is a
+ // nonloc::LocAsInteger, we need to cast it back to the dynamic type before
+ // dereferencing.
+ bool NeedsCastBack = isVoidPointer(FR->getDecl()->getType()) ||
+ V.getAs<nonloc::LocAsInteger>();
+
+ // The region we'd like to acquire.
+ const auto *R = V.getAsRegion()->getAs<TypedValueRegion>();
+ if (!R)
+ return None;
+
+ VisitedRegions.insert(R);
+
+ // We acquire the dynamic type of R,
+ QualType DynT = R->getLocationType();
+
+ while (const MemRegion *Tmp = State->getSVal(R, DynT).getAsRegion()) {
+
+ R = Tmp->getAs<TypedValueRegion>();
+ if (!R)
+ return None;
+
+ // We found a cyclic pointer, like int *ptr = (int *)&ptr.
+ if (!VisitedRegions.insert(R).second)
+ return DereferenceInfo{R, NeedsCastBack, /*IsCyclic*/ true};
+
+ DynT = R->getLocationType();
+ // In order to ensure that this loop terminates, we're also checking the
+ // dynamic type of R, since type hierarchy is finite.
+ if (isDereferencableType(DynT->getPointeeType()))
+ break;
+ }
+
+ while (R->getAs<CXXBaseObjectRegion>()) {
+ NeedsCastBack = true;
+
+ if (!isa<TypedValueRegion>(R->getSuperRegion()))
+ break;
+ R = R->getSuperRegion()->getAs<TypedValueRegion>();
+ }
+
+ return DereferenceInfo{R, NeedsCastBack, /*IsCyclic*/ false};
+}
+
+static bool isVoidPointer(QualType T) {
+ while (!T.isNull()) {
+ if (T->isVoidPointerType())
+ return true;
+ T = T->getPointeeType();
+ }
+ return false;
+}
diff --git a/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp b/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp
deleted file mode 100644
index 398228a9d887..000000000000
--- a/lib/StaticAnalyzer/Checkers/UninitializedObjectChecker.cpp
+++ /dev/null
@@ -1,688 +0,0 @@
-//===----- UninitializedObjectChecker.cpp ------------------------*- C++ -*-==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a checker that reports uninitialized fields in objects
-// created after a constructor call.
-//
-// This checker has two options:
-// - "Pedantic" (boolean). If its not set or is set to false, the checker
-// won't emit warnings for objects that don't have at least one initialized
-// field. This may be set with
-//
-// `-analyzer-config alpha.cplusplus.UninitializedObject:Pedantic=true`.
-//
-// - "NotesAsWarnings" (boolean). If set to true, the checker will emit a
-// warning for each uninitalized field, as opposed to emitting one warning
-// per constructor call, and listing the uninitialized fields that belongs
-// to it in notes. Defaults to false.
-//
-// `-analyzer-config alpha.cplusplus.UninitializedObject:NotesAsWarnings=true`.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ClangSACheckers.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include <algorithm>
-
-using namespace clang;
-using namespace clang::ento;
-
-namespace {
-
-class UninitializedObjectChecker : public Checker<check::EndFunction> {
- std::unique_ptr<BuiltinBug> BT_uninitField;
-
-public:
- // These fields will be initialized when registering the checker.
- bool IsPedantic;
- bool ShouldConvertNotesToWarnings;
-
- UninitializedObjectChecker()
- : BT_uninitField(new BuiltinBug(this, "Uninitialized fields")) {}
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
-};
-
-/// Represents a field chain. A field chain is a vector of fields where the
-/// first element of the chain is the object under checking (not stored), and
-/// every other element is a field, and the element that precedes it is the
-/// object that contains it.
-///
-/// Note that this class is immutable, and new fields may only be added through
-/// constructor calls.
-class FieldChainInfo {
- using FieldChain = llvm::ImmutableList<const FieldRegion *>;
-
- FieldChain Chain;
-
- const bool IsDereferenced = false;
-
-public:
- FieldChainInfo() = default;
-
- FieldChainInfo(const FieldChainInfo &Other, const bool IsDereferenced)
- : Chain(Other.Chain), IsDereferenced(IsDereferenced) {}
-
- FieldChainInfo(const FieldChainInfo &Other, const FieldRegion *FR,
- const bool IsDereferenced = false);
-
- bool contains(const FieldRegion *FR) const { return Chain.contains(FR); }
- bool isPointer() const;
-
- /// If this is a fieldchain whose last element is an uninitialized region of a
- /// pointer type, `IsDereferenced` will store whether the pointer itself or
- /// the pointee is uninitialized.
- bool isDereferenced() const;
- const FieldDecl *getEndOfChain() const;
- void print(llvm::raw_ostream &Out) const;
-
-private:
- /// Prints every element except the last to `Out`. Since ImmutableLists store
- /// elements in reverse order, and have no reverse iterators, we use a
- /// recursive function to print the fieldchain correctly. The last element in
- /// the chain is to be printed by `print`.
- static void printTail(llvm::raw_ostream &Out,
- const llvm::ImmutableListImpl<const FieldRegion *> *L);
- friend struct FieldChainInfoComparator;
-};
-
-struct FieldChainInfoComparator {
- bool operator()(const FieldChainInfo &lhs, const FieldChainInfo &rhs) const {
- assert(!lhs.Chain.isEmpty() && !rhs.Chain.isEmpty() &&
- "Attempted to store an empty fieldchain!");
- return *lhs.Chain.begin() < *rhs.Chain.begin();
- }
-};
-
-using UninitFieldSet = std::set<FieldChainInfo, FieldChainInfoComparator>;
-
-/// Searches for and stores uninitialized fields in a non-union object.
-class FindUninitializedFields {
- ProgramStateRef State;
- const TypedValueRegion *const ObjectR;
-
- const bool IsPedantic;
- bool IsAnyFieldInitialized = false;
-
- UninitFieldSet UninitFields;
-
-public:
- FindUninitializedFields(ProgramStateRef State,
- const TypedValueRegion *const R, bool IsPedantic);
- const UninitFieldSet &getUninitFields();
-
-private:
- /// Adds a FieldChainInfo object to UninitFields. Return true if an insertion
- /// took place.
- bool addFieldToUninits(FieldChainInfo LocalChain);
-
- // For the purposes of this checker, we'll regard the object under checking as
- // a directed tree, where
- // * the root is the object under checking
- // * every node is an object that is
- // - a union
- // - a non-union record
- // - a pointer/reference
- // - an array
- // - of a primitive type, which we'll define later in a helper function.
- // * the parent of each node is the object that contains it
- // * every leaf is an array, a primitive object, a nullptr or an undefined
- // pointer.
- //
- // Example:
- //
- // struct A {
- // struct B {
- // int x, y = 0;
- // };
- // B b;
- // int *iptr = new int;
- // B* bptr;
- //
- // A() {}
- // };
- //
- // The directed tree:
- //
- // ->x
- // /
- // ->b--->y
- // /
- // A-->iptr->(int value)
- // \
- // ->bptr
- //
- // From this we'll construct a vector of fieldchains, where each fieldchain
- // represents an uninitialized field. An uninitialized field may be a
- // primitive object, a pointer, a pointee or a union without a single
- // initialized field.
- // In the above example, for the default constructor call we'll end up with
- // these fieldchains:
- //
- // this->b.x
- // this->iptr (pointee uninit)
- // this->bptr (pointer uninit)
- //
- // We'll traverse each node of the above graph with the appropiate one of
- // these methods:
-
- /// This method checks a region of a union object, and returns true if no
- /// field is initialized within the region.
- bool isUnionUninit(const TypedValueRegion *R);
-
- /// This method checks a region of a non-union object, and returns true if
- /// an uninitialized field is found within the region.
- bool isNonUnionUninit(const TypedValueRegion *R, FieldChainInfo LocalChain);
-
- /// This method checks a region of a pointer or reference object, and returns
- /// true if the ptr/ref object itself or any field within the pointee's region
- /// is uninitialized.
- bool isPointerOrReferenceUninit(const FieldRegion *FR,
- FieldChainInfo LocalChain);
-
- /// This method returns true if the value of a primitive object is
- /// uninitialized.
- bool isPrimitiveUninit(const SVal &V);
-
- // Note that we don't have a method for arrays -- the elements of an array are
- // often left uninitialized intentionally even when it is of a C++ record
- // type, so we'll assume that an array is always initialized.
- // TODO: Add a support for nonloc::LocAsInteger.
-};
-
-} // end of anonymous namespace
-
-// Static variable instantionations.
-
-static llvm::ImmutableListFactory<const FieldRegion *> Factory;
-
-// Utility function declarations.
-
-/// Returns the object that was constructed by CtorDecl, or None if that isn't
-/// possible.
-static Optional<nonloc::LazyCompoundVal>
-getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context);
-
-/// Checks whether the constructor under checking is called by another
-/// constructor.
-static bool isCalledByConstructor(const CheckerContext &Context);
-
-/// Returns whether FD can be (transitively) dereferenced to a void pointer type
-/// (void*, void**, ...). The type of the region behind a void pointer isn't
-/// known, and thus FD can not be analyzed.
-static bool isVoidPointer(const FieldDecl *FD);
-
-/// Returns true if T is a primitive type. We defined this type so that for
-/// objects that we'd only like analyze as much as checking whether their
-/// value is undefined or not, such as ints and doubles, can be analyzed with
-/// ease. This also helps ensuring that every special field type is handled
-/// correctly.
-static bool isPrimitiveType(const QualType &T) {
- return T->isBuiltinType() || T->isEnumeralType() || T->isMemberPointerType();
-}
-
-/// Constructs a note message for a given FieldChainInfo object.
-static void printNoteMessage(llvm::raw_ostream &Out,
- const FieldChainInfo &Chain);
-
-/// Returns with Field's name. This is a helper function to get the correct name
-/// even if Field is a captured lambda variable.
-static StringRef getVariableName(const FieldDecl *Field);
-
-//===----------------------------------------------------------------------===//
-// Methods for UninitializedObjectChecker.
-//===----------------------------------------------------------------------===//
-
-void UninitializedObjectChecker::checkEndFunction(
- const ReturnStmt *RS, CheckerContext &Context) const {
-
- const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(
- Context.getLocationContext()->getDecl());
- if (!CtorDecl)
- return;
-
- if (!CtorDecl->isUserProvided())
- return;
-
- if (CtorDecl->getParent()->isUnion())
- return;
-
- // This avoids essentially the same error being reported multiple times.
- if (isCalledByConstructor(Context))
- return;
-
- Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context);
- if (!Object)
- return;
-
- FindUninitializedFields F(Context.getState(), Object->getRegion(),
- IsPedantic);
-
- const UninitFieldSet &UninitFields = F.getUninitFields();
-
- if (UninitFields.empty())
- return;
-
- // There are uninitialized fields in the record.
-
- ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState());
- if (!Node)
- return;
-
- PathDiagnosticLocation LocUsedForUniqueing;
- const Stmt *CallSite = Context.getStackFrame()->getCallSite();
- if (CallSite)
- LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
- CallSite, Context.getSourceManager(), Node->getLocationContext());
-
- // For Plist consumers that don't support notes just yet, we'll convert notes
- // to warnings.
- if (ShouldConvertNotesToWarnings) {
- for (const auto &Chain : UninitFields) {
- SmallString<100> WarningBuf;
- llvm::raw_svector_ostream WarningOS(WarningBuf);
-
- printNoteMessage(WarningOS, Chain);
-
- auto Report = llvm::make_unique<BugReport>(
- *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
- Node->getLocationContext()->getDecl());
- Context.emitReport(std::move(Report));
- }
- return;
- }
-
- SmallString<100> WarningBuf;
- llvm::raw_svector_ostream WarningOS(WarningBuf);
- WarningOS << UninitFields.size() << " uninitialized field"
- << (UninitFields.size() == 1 ? "" : "s")
- << " at the end of the constructor call";
-
- auto Report = llvm::make_unique<BugReport>(
- *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
- Node->getLocationContext()->getDecl());
-
- for (const auto &Chain : UninitFields) {
- SmallString<200> NoteBuf;
- llvm::raw_svector_ostream NoteOS(NoteBuf);
-
- printNoteMessage(NoteOS, Chain);
-
- Report->addNote(NoteOS.str(),
- PathDiagnosticLocation::create(Chain.getEndOfChain(),
- Context.getSourceManager()));
- }
- Context.emitReport(std::move(Report));
-}
-
-//===----------------------------------------------------------------------===//
-// Methods for FindUninitializedFields.
-//===----------------------------------------------------------------------===//
-
-FindUninitializedFields::FindUninitializedFields(
- ProgramStateRef State, const TypedValueRegion *const R, bool IsPedantic)
- : State(State), ObjectR(R), IsPedantic(IsPedantic) {}
-
-const UninitFieldSet &FindUninitializedFields::getUninitFields() {
- isNonUnionUninit(ObjectR, FieldChainInfo());
-
- if (!IsPedantic && !IsAnyFieldInitialized)
- UninitFields.clear();
-
- return UninitFields;
-}
-
-bool FindUninitializedFields::addFieldToUninits(FieldChainInfo Chain) {
- if (State->getStateManager().getContext().getSourceManager().isInSystemHeader(
- Chain.getEndOfChain()->getLocation()))
- return false;
-
- return UninitFields.insert(Chain).second;
-}
-
-bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
- FieldChainInfo LocalChain) {
- assert(R->getValueType()->isRecordType() &&
- !R->getValueType()->isUnionType() &&
- "This method only checks non-union record objects!");
-
- const RecordDecl *RD =
- R->getValueType()->getAs<RecordType>()->getDecl()->getDefinition();
- assert(RD && "Referred record has no definition");
-
- bool ContainsUninitField = false;
-
- // Are all of this non-union's fields initialized?
- for (const FieldDecl *I : RD->fields()) {
-
- const auto FieldVal =
- State->getLValue(I, loc::MemRegionVal(R)).castAs<loc::MemRegionVal>();
- const auto *FR = FieldVal.getRegionAs<FieldRegion>();
- QualType T = I->getType();
-
- // If LocalChain already contains FR, then we encountered a cyclic
- // reference. In this case, region FR is already under checking at an
- // earlier node in the directed tree.
- if (LocalChain.contains(FR))
- return false;
-
- if (T->isStructureOrClassType()) {
- if (isNonUnionUninit(FR, {LocalChain, FR}))
- ContainsUninitField = true;
- continue;
- }
-
- if (T->isUnionType()) {
- if (isUnionUninit(FR)) {
- if (addFieldToUninits({LocalChain, FR}))
- ContainsUninitField = true;
- } else
- IsAnyFieldInitialized = true;
- continue;
- }
-
- if (T->isArrayType()) {
- IsAnyFieldInitialized = true;
- continue;
- }
-
- if (T->isPointerType() || T->isReferenceType()) {
- if (isPointerOrReferenceUninit(FR, LocalChain))
- ContainsUninitField = true;
- continue;
- }
-
- if (isPrimitiveType(T)) {
- SVal V = State->getSVal(FieldVal);
-
- if (isPrimitiveUninit(V)) {
- if (addFieldToUninits({LocalChain, FR}))
- ContainsUninitField = true;
- }
- continue;
- }
-
- llvm_unreachable("All cases are handled!");
- }
-
- // Checking bases.
- // FIXME: As of now, because of `isCalledByConstructor`, objects whose type
- // is a descendant of another type will emit warnings for uninitalized
- // inherited members.
- // This is not the only way to analyze bases of an object -- if we didn't
- // filter them out, and didn't analyze the bases, this checker would run for
- // each base of the object in order of base initailization and in theory would
- // find every uninitalized field. This approach could also make handling
- // diamond inheritances more easily.
- //
- // This rule (that a descendant type's cunstructor is responsible for
- // initializing inherited data members) is not obvious, and should it should
- // be.
- const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
- if (!CXXRD)
- return ContainsUninitField;
-
- for (const CXXBaseSpecifier &BaseSpec : CXXRD->bases()) {
- const auto *BaseRegion = State->getLValue(BaseSpec, R)
- .castAs<loc::MemRegionVal>()
- .getRegionAs<TypedValueRegion>();
-
- if (isNonUnionUninit(BaseRegion, LocalChain))
- ContainsUninitField = true;
- }
-
- return ContainsUninitField;
-}
-
-bool FindUninitializedFields::isUnionUninit(const TypedValueRegion *R) {
- assert(R->getValueType()->isUnionType() &&
- "This method only checks union objects!");
- // TODO: Implement support for union fields.
- return false;
-}
-
-// Note that pointers/references don't contain fields themselves, so in this
-// function we won't add anything to LocalChain.
-bool FindUninitializedFields::isPointerOrReferenceUninit(
- const FieldRegion *FR, FieldChainInfo LocalChain) {
-
- assert((FR->getDecl()->getType()->isPointerType() ||
- FR->getDecl()->getType()->isReferenceType()) &&
- "This method only checks pointer/reference objects!");
-
- SVal V = State->getSVal(FR);
-
- if (V.isUnknown() || V.isZeroConstant()) {
- IsAnyFieldInitialized = true;
- return false;
- }
-
- if (V.isUndef()) {
- return addFieldToUninits({LocalChain, FR});
- }
-
- const FieldDecl *FD = FR->getDecl();
-
- // TODO: The dynamic type of a void pointer may be retrieved with
- // `getDynamicTypeInfo`.
- if (isVoidPointer(FD)) {
- IsAnyFieldInitialized = true;
- return false;
- }
-
- assert(V.getAs<Loc>() && "V should be Loc at this point!");
-
- // At this point the pointer itself is initialized and points to a valid
- // location, we'll now check the pointee.
- SVal DerefdV = State->getSVal(V.castAs<Loc>());
-
- // TODO: Dereferencing should be done according to the dynamic type.
- while (Optional<Loc> L = DerefdV.getAs<Loc>()) {
- DerefdV = State->getSVal(*L);
- }
-
- // If V is a pointer pointing to a record type.
- if (Optional<nonloc::LazyCompoundVal> RecordV =
- DerefdV.getAs<nonloc::LazyCompoundVal>()) {
-
- const TypedValueRegion *R = RecordV->getRegion();
-
- // We can't reason about symbolic regions, assume its initialized.
- // Note that this also avoids a potential infinite recursion, because
- // constructors for list-like classes are checked without being called, and
- // the Static Analyzer will construct a symbolic region for Node *next; or
- // similar code snippets.
- if (R->getSymbolicBase()) {
- IsAnyFieldInitialized = true;
- return false;
- }
-
- const QualType T = R->getValueType();
-
- if (T->isStructureOrClassType())
- return isNonUnionUninit(R, {LocalChain, FR});
-
- if (T->isUnionType()) {
- if (isUnionUninit(R)) {
- return addFieldToUninits({LocalChain, FR, /*IsDereferenced*/ true});
- } else {
- IsAnyFieldInitialized = true;
- return false;
- }
- }
-
- if (T->isArrayType()) {
- IsAnyFieldInitialized = true;
- return false;
- }
-
- llvm_unreachable("All cases are handled!");
- }
-
- // TODO: If possible, it should be asserted that the DerefdV at this point is
- // primitive.
-
- if (isPrimitiveUninit(DerefdV))
- return addFieldToUninits({LocalChain, FR, /*IsDereferenced*/ true});
-
- IsAnyFieldInitialized = true;
- return false;
-}
-
-bool FindUninitializedFields::isPrimitiveUninit(const SVal &V) {
- if (V.isUndef())
- return true;
-
- IsAnyFieldInitialized = true;
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Methods for FieldChainInfo.
-//===----------------------------------------------------------------------===//
-
-FieldChainInfo::FieldChainInfo(const FieldChainInfo &Other,
- const FieldRegion *FR, const bool IsDereferenced)
- : FieldChainInfo(Other, IsDereferenced) {
- assert(!contains(FR) && "Can't add a field that is already a part of the "
- "fieldchain! Is this a cyclic reference?");
- Chain = Factory.add(FR, Other.Chain);
-}
-
-bool FieldChainInfo::isPointer() const {
- assert(!Chain.isEmpty() && "Empty fieldchain!");
- return (*Chain.begin())->getDecl()->getType()->isPointerType();
-}
-
-bool FieldChainInfo::isDereferenced() const {
- assert(isPointer() && "Only pointers may or may not be dereferenced!");
- return IsDereferenced;
-}
-
-const FieldDecl *FieldChainInfo::getEndOfChain() const {
- assert(!Chain.isEmpty() && "Empty fieldchain!");
- return (*Chain.begin())->getDecl();
-}
-
-// TODO: This function constructs an incorrect fieldchain string in the
-// following case:
-//
-// struct Base { int x; };
-// struct D1 : Base {}; struct D2 : Base {};
-//
-// struct MostDerived : D1, D2 {
-// MostDerived() {}
-// }
-//
-// A call to MostDerived::MostDerived() will cause two notes that say
-// "uninitialized field 'this->x'", but we can't refer to 'x' directly,
-// we need an explicit namespace resolution whether the uninit field was
-// 'D1::x' or 'D2::x'.
-void FieldChainInfo::print(llvm::raw_ostream &Out) const {
- if (Chain.isEmpty())
- return;
-
- const llvm::ImmutableListImpl<const FieldRegion *> *L =
- Chain.getInternalPointer();
- printTail(Out, L->getTail());
- Out << getVariableName(L->getHead()->getDecl());
-}
-
-void FieldChainInfo::printTail(
- llvm::raw_ostream &Out,
- const llvm::ImmutableListImpl<const FieldRegion *> *L) {
- if (!L)
- return;
-
- printTail(Out, L->getTail());
- const FieldDecl *Field = L->getHead()->getDecl();
- Out << getVariableName(Field);
- Out << (Field->getType()->isPointerType() ? "->" : ".");
-}
-
-//===----------------------------------------------------------------------===//
-// Utility functions.
-//===----------------------------------------------------------------------===//
-
-static bool isVoidPointer(const FieldDecl *FD) {
- QualType T = FD->getType();
-
- while (!T.isNull()) {
- if (T->isVoidPointerType())
- return true;
- T = T->getPointeeType();
- }
- return false;
-}
-
-static Optional<nonloc::LazyCompoundVal>
-getObjectVal(const CXXConstructorDecl *CtorDecl, CheckerContext &Context) {
-
- Loc ThisLoc = Context.getSValBuilder().getCXXThis(CtorDecl->getParent(),
- Context.getStackFrame());
- // Getting the value for 'this'.
- SVal This = Context.getState()->getSVal(ThisLoc);
-
- // Getting the value for '*this'.
- SVal Object = Context.getState()->getSVal(This.castAs<Loc>());
-
- return Object.getAs<nonloc::LazyCompoundVal>();
-}
-
-// TODO: We should also check that if the constructor was called by another
-// constructor, whether those two are in any relation to one another. In it's
-// current state, this introduces some false negatives.
-static bool isCalledByConstructor(const CheckerContext &Context) {
- const LocationContext *LC = Context.getLocationContext()->getParent();
-
- while (LC) {
- if (isa<CXXConstructorDecl>(LC->getDecl()))
- return true;
-
- LC = LC->getParent();
- }
- return false;
-}
-
-static void printNoteMessage(llvm::raw_ostream &Out,
- const FieldChainInfo &Chain) {
- if (Chain.isPointer()) {
- if (Chain.isDereferenced())
- Out << "uninitialized pointee 'this->";
- else
- Out << "uninitialized pointer 'this->";
- } else
- Out << "uninitialized field 'this->";
- Chain.print(Out);
- Out << "'";
-}
-
-static StringRef getVariableName(const FieldDecl *Field) {
- // If Field is a captured lambda variable, Field->getName() will return with
- // an empty string. We can however acquire it's name from the lambda's
- // captures.
- const auto *CXXParent = dyn_cast<CXXRecordDecl>(Field->getParent());
-
- if (CXXParent && CXXParent->isLambda()) {
- assert(CXXParent->captures_begin());
- auto It = CXXParent->captures_begin() + Field->getFieldIndex();
- return It->getCapturedVar()->getName();
- }
-
- return Field->getName();
-}
-
-void ento::registerUninitializedObjectChecker(CheckerManager &Mgr) {
- auto Chk = Mgr.registerChecker<UninitializedObjectChecker>();
- Chk->IsPedantic = Mgr.getAnalyzerOptions().getBooleanOption(
- "Pedantic", /*DefaultVal*/ false, Chk);
- Chk->ShouldConvertNotesToWarnings = Mgr.getAnalyzerOptions().getBooleanOption(
- "NotesAsWarnings", /*DefaultVal*/ false, Chk);
-}
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index a6b50dc37740..bab0c12704fa 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -314,7 +314,7 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
auto report = llvm::make_unique<BugReport>(*BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, arg, *report);
+ bugreporter::trackExpressionValue(N, arg, *report);
C.emitReport(std::move(report));
return true;
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index dbd12cc9b65a..16b4d5e925ba 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -13,7 +13,7 @@
// A similar flow-sensitive only check exists in Analysis/ReachableCode.cpp
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
@@ -150,7 +150,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
if (const Stmt *S = getUnreachableStmt(CB)) {
// In macros, 'do {...} while (0)' is often used. Don't warn about the
// condition 0 when it is unreachable.
- if (S->getLocStart().isMacroID())
+ if (S->getBeginLoc().isMacroID())
if (const auto *I = dyn_cast<IntegerLiteral>(S))
if (I->getValue() == 0ULL)
if (const Stmt *Parent = PM->getParent(S))
@@ -232,7 +232,7 @@ bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
if (!pred)
return false;
- // Get the predecessor block's terminator conditon
+ // Get the predecessor block's terminator condition
const Stmt *cond = pred->getTerminatorCondition();
//assert(cond && "CFGBlock's predecessor has a terminator condition");
diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 2584f2011819..e458e0554ee2 100644
--- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -14,7 +14,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -74,7 +74,7 @@ void VLASizeChecker::reportBug(
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addVisitor(std::move(Visitor));
report->addRange(SizeE->getSourceRange());
- bugreporter::trackNullOrUndefValue(N, SizeE, *report);
+ bugreporter::trackExpressionValue(N, SizeE, *report);
C.emitReport(std::move(report));
}
diff --git a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index bd657340fcfb..748b226b7a1e 100644
--- a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -90,7 +90,6 @@ private:
return std::make_shared<PathDiagnosticEventPiece>(L, BR.getDescription(), false);
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
@@ -376,10 +375,10 @@ void ValistChecker::checkVAListEndCall(const CallEvent &Call,
}
std::shared_ptr<PathDiagnosticPiece> ValistChecker::ValistBugVisitor::VisitNode(
- const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ const ExplodedNode *N, BugReporterContext &BRC,
BugReport &) {
ProgramStateRef State = N->getState();
- ProgramStateRef StatePrev = PrevN->getState();
+ ProgramStateRef StatePrev = N->getFirstPred()->getState();
const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
diff --git a/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index 75aefc0e8384..3ee9f1a07fa2 100644
--- a/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -25,7 +25,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index 5b602468cdd4..567063197405 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -72,7 +72,6 @@ private:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
};
@@ -84,9 +83,8 @@ REGISTER_MAP_WITH_PROGRAMSTATE(CtorDtorMap, const MemRegion *, ObjectState)
std::shared_ptr<PathDiagnosticPiece>
VirtualCallChecker::VirtualBugVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
- BugReport &BR) {
+ BugReport &) {
// We need the last ctor/dtor which call the virtual function.
// The visitor walks the ExplodedGraph backwards.
if (Found)
@@ -282,5 +280,6 @@ void ento::registerVirtualCallChecker(CheckerManager &mgr) {
VirtualCallChecker *checker = mgr.registerChecker<VirtualCallChecker>();
checker->IsPureOnly =
- mgr.getAnalyzerOptions().getBooleanOption("PureOnly", false, checker);
+ mgr.getAnalyzerOptions().getCheckerBooleanOption("PureOnly", false,
+ checker);
}
diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index dc0d3ec8493a..7fb1c09ca049 100644
--- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -14,28 +14,33 @@ using namespace ento;
void AnalysisManager::anchor() { }
-AnalysisManager::AnalysisManager(
- ASTContext &ASTCtx, DiagnosticsEngine &diags, const LangOptions &lang,
- const PathDiagnosticConsumers &PDC, StoreManagerCreator storemgr,
- ConstraintManagerCreator constraintmgr, CheckerManager *checkerMgr,
- AnalyzerOptions &Options, CodeInjector *injector)
- : AnaCtxMgr(ASTCtx, Options.UnoptimizedCFG,
- Options.includeImplicitDtorsInCFG(),
- /*AddInitializers=*/true, Options.includeTemporaryDtorsInCFG(),
- Options.includeLifetimeInCFG(),
- // Adding LoopExit elements to the CFG is a requirement for loop
- // unrolling.
- Options.includeLoopExitInCFG() || Options.shouldUnrollLoops(),
- Options.includeScopesInCFG(),
- Options.shouldSynthesizeBodies(),
- Options.shouldConditionalizeStaticInitializers(),
- /*addCXXNewAllocator=*/true,
- Options.includeRichConstructorsInCFG(),
- Options.shouldElideConstructors(),
- injector),
- Ctx(ASTCtx), Diags(diags), LangOpts(lang), PathConsumers(PDC),
- CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
- CheckerMgr(checkerMgr), options(Options) {
+AnalysisManager::AnalysisManager(ASTContext &ASTCtx, DiagnosticsEngine &diags,
+ const PathDiagnosticConsumers &PDC,
+ StoreManagerCreator storemgr,
+ ConstraintManagerCreator constraintmgr,
+ CheckerManager *checkerMgr,
+ AnalyzerOptions &Options,
+ CodeInjector *injector)
+ : AnaCtxMgr(
+ ASTCtx, Options.UnoptimizedCFG,
+ Options.ShouldIncludeImplicitDtorsInCFG,
+ /*AddInitializers=*/true,
+ Options.ShouldIncludeTemporaryDtorsInCFG,
+ Options.ShouldIncludeLifetimeInCFG,
+ // Adding LoopExit elements to the CFG is a requirement for loop
+ // unrolling.
+ Options.ShouldIncludeLoopExitInCFG ||
+ Options.ShouldUnrollLoops,
+ Options.ShouldIncludeScopesInCFG,
+ Options.ShouldSynthesizeBodies,
+ Options.ShouldConditionalizeStaticInitializers,
+ /*addCXXNewAllocator=*/true,
+ Options.ShouldIncludeRichConstructorsInCFG,
+ Options.ShouldElideConstructors, injector),
+ Ctx(ASTCtx), Diags(diags), LangOpts(ASTCtx.getLangOpts()),
+ PathConsumers(PDC), CreateStoreMgr(storemgr),
+ CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr),
+ options(Options) {
AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
}
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 9b2dc32e0600..0588c2bd3d35 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -34,7 +34,7 @@ std::vector<StringRef>
AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
static const StringRef StaticAnalyzerChecks[] = {
#define GET_CHECKERS
-#define CHECKER(FULLNAME, CLASS, DESCFILE, HELPTEXT, GROUPINDEX, HIDDEN) \
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
FULLNAME,
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
#undef CHECKER
@@ -49,114 +49,71 @@ AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
return Result;
}
-AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
- if (UserMode == UMK_NotSet) {
- StringRef ModeStr =
- Config.insert(std::make_pair("mode", "deep")).first->second;
- UserMode = llvm::StringSwitch<UserModeKind>(ModeStr)
- .Case("shallow", UMK_Shallow)
- .Case("deep", UMK_Deep)
- .Default(UMK_NotSet);
- assert(UserMode != UMK_NotSet && "User mode is invalid.");
- }
- return UserMode;
-}
-
-AnalyzerOptions::ExplorationStrategyKind
-AnalyzerOptions::getExplorationStrategy() {
- if (ExplorationStrategy == ExplorationStrategyKind::NotSet) {
- StringRef StratStr =
- Config
- .insert(std::make_pair("exploration_strategy", "unexplored_first_queue"))
- .first->second;
- ExplorationStrategy =
- llvm::StringSwitch<ExplorationStrategyKind>(StratStr)
- .Case("dfs", ExplorationStrategyKind::DFS)
- .Case("bfs", ExplorationStrategyKind::BFS)
- .Case("unexplored_first",
- ExplorationStrategyKind::UnexploredFirst)
- .Case("unexplored_first_queue",
- ExplorationStrategyKind::UnexploredFirstQueue)
- .Case("bfs_block_dfs_contents",
- ExplorationStrategyKind::BFSBlockDFSContents)
- .Default(ExplorationStrategyKind::NotSet);
- assert(ExplorationStrategy != ExplorationStrategyKind::NotSet &&
- "User mode is invalid.");
- }
- return ExplorationStrategy;
-}
-
-IPAKind AnalyzerOptions::getIPAMode() {
- if (IPAMode == IPAK_NotSet) {
- // Use the User Mode to set the default IPA value.
- // Note, we have to add the string to the Config map for the ConfigDumper
- // checker to function properly.
- const char *DefaultIPA = nullptr;
- UserModeKind HighLevelMode = getUserMode();
- if (HighLevelMode == UMK_Shallow)
- DefaultIPA = "inlining";
- else if (HighLevelMode == UMK_Deep)
- DefaultIPA = "dynamic-bifurcate";
- assert(DefaultIPA);
-
- // Lookup the ipa configuration option, use the default from User Mode.
- StringRef ModeStr =
- Config.insert(std::make_pair("ipa", DefaultIPA)).first->second;
- IPAKind IPAConfig = llvm::StringSwitch<IPAKind>(ModeStr)
- .Case("none", IPAK_None)
- .Case("basic-inlining", IPAK_BasicInlining)
- .Case("inlining", IPAK_Inlining)
- .Case("dynamic", IPAK_DynamicDispatch)
- .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
- .Default(IPAK_NotSet);
- assert(IPAConfig != IPAK_NotSet && "IPA Mode is invalid.");
-
- // Set the member variable.
- IPAMode = IPAConfig;
- }
-
- return IPAMode;
+ExplorationStrategyKind
+AnalyzerOptions::getExplorationStrategy() const {
+ auto K =
+ llvm::StringSwitch<llvm::Optional<ExplorationStrategyKind>>(
+ ExplorationStrategy)
+ .Case("dfs", ExplorationStrategyKind::DFS)
+ .Case("bfs", ExplorationStrategyKind::BFS)
+ .Case("unexplored_first",
+ ExplorationStrategyKind::UnexploredFirst)
+ .Case("unexplored_first_queue",
+ ExplorationStrategyKind::UnexploredFirstQueue)
+ .Case("unexplored_first_location_queue",
+ ExplorationStrategyKind::UnexploredFirstLocationQueue)
+ .Case("bfs_block_dfs_contents",
+ ExplorationStrategyKind::BFSBlockDFSContents)
+ .Default(None);
+ assert(K.hasValue() && "User mode is invalid.");
+ return K.getValue();
+}
+
+IPAKind AnalyzerOptions::getIPAMode() const {
+ auto K = llvm::StringSwitch<llvm::Optional<IPAKind>>(IPAMode)
+ .Case("none", IPAK_None)
+ .Case("basic-inlining", IPAK_BasicInlining)
+ .Case("inlining", IPAK_Inlining)
+ .Case("dynamic", IPAK_DynamicDispatch)
+ .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
+ .Default(None);
+ assert(K.hasValue() && "IPA Mode is invalid.");
+
+ return K.getValue();
}
bool
-AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
+AnalyzerOptions::mayInlineCXXMemberFunction(
+ CXXInlineableMemberKind Param) const {
if (getIPAMode() < IPAK_Inlining)
return false;
- if (!CXXMemberInliningMode) {
- static const char *ModeKey = "c++-inlining";
-
- StringRef ModeStr =
- Config.insert(std::make_pair(ModeKey, "destructors")).first->second;
+ auto K =
+ llvm::StringSwitch<llvm::Optional<CXXInlineableMemberKind>>(
+ CXXMemberInliningMode)
+ .Case("constructors", CIMK_Constructors)
+ .Case("destructors", CIMK_Destructors)
+ .Case("methods", CIMK_MemberFunctions)
+ .Case("none", CIMK_None)
+ .Default(None);
- CXXInlineableMemberKind &MutableMode =
- const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
-
- MutableMode = llvm::StringSwitch<CXXInlineableMemberKind>(ModeStr)
- .Case("constructors", CIMK_Constructors)
- .Case("destructors", CIMK_Destructors)
- .Case("none", CIMK_None)
- .Case("methods", CIMK_MemberFunctions)
- .Default(CXXInlineableMemberKind());
-
- if (!MutableMode) {
- // FIXME: We should emit a warning here about an unknown inlining kind,
- // but the AnalyzerOptions doesn't have access to a diagnostic engine.
- MutableMode = CIMK_None;
- }
- }
+ assert(K.hasValue() && "Invalid c++ member function inlining mode.");
- return CXXMemberInliningMode >= K;
+ return *K >= Param;
}
-static StringRef toString(bool b) { return b ? "true" : "false"; }
-
-StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName,
- StringRef OptionName,
- StringRef Default,
- bool SearchInParents) {
+StringRef AnalyzerOptions::getCheckerStringOption(StringRef OptionName,
+ StringRef DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) const {
+ assert(C);
// Search for a package option if the option for the checker is not specified
// and search in parents is enabled.
+ StringRef CheckerName = C->getTagDescription();
+
+ assert(!CheckerName.empty() &&
+ "Empty checker name! Make sure the checker object (including it's "
+ "bases!) if fully initialized before calling this function!");
ConfigTable::const_iterator E = Config.end();
do {
ConfigTable::const_iterator I =
@@ -165,331 +122,35 @@ StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName,
return StringRef(I->getValue());
size_t Pos = CheckerName.rfind('.');
if (Pos == StringRef::npos)
- return Default;
+ return DefaultVal;
CheckerName = CheckerName.substr(0, Pos);
} while (!CheckerName.empty() && SearchInParents);
- return Default;
+ return DefaultVal;
}
-bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal,
- const CheckerBase *C,
- bool SearchInParents) {
+bool AnalyzerOptions::getCheckerBooleanOption(StringRef Name, bool DefaultVal,
+ const CheckerBase *C,
+ bool SearchInParents) const {
// FIXME: We should emit a warning here if the value is something other than
// "true", "false", or the empty string (meaning the default value),
// but the AnalyzerOptions doesn't have access to a diagnostic engine.
- StringRef Default = toString(DefaultVal);
- StringRef V =
- C ? getCheckerOption(C->getTagDescription(), Name, Default,
- SearchInParents)
- : StringRef(Config.insert(std::make_pair(Name, Default)).first->second);
- return llvm::StringSwitch<bool>(V)
+ assert(C);
+ return llvm::StringSwitch<bool>(
+ getCheckerStringOption(Name, DefaultVal ? "true" : "false", C,
+ SearchInParents))
.Case("true", true)
.Case("false", false)
.Default(DefaultVal);
}
-bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name,
- bool DefaultVal, const CheckerBase *C,
- bool SearchInParents) {
- if (!V.hasValue())
- V = getBooleanOption(Name, DefaultVal, C, SearchInParents);
- return V.getValue();
-}
-
-bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
- return getBooleanOption(IncludeTemporaryDtorsInCFG,
- "cfg-temporary-dtors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeImplicitDtorsInCFG() {
- return getBooleanOption(IncludeImplicitDtorsInCFG,
- "cfg-implicit-dtors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeLifetimeInCFG() {
- return getBooleanOption(IncludeLifetimeInCFG, "cfg-lifetime",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::includeLoopExitInCFG() {
- return getBooleanOption(IncludeLoopExitInCFG, "cfg-loopexit",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::includeRichConstructorsInCFG() {
- return getBooleanOption(IncludeRichConstructorsInCFG,
- "cfg-rich-constructors",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::includeScopesInCFG() {
- return getBooleanOption(IncludeScopesInCFG,
- "cfg-scopes",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
- return getBooleanOption(InlineCXXStandardLibrary,
- "c++-stdlib-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineTemplateFunctions() {
- return getBooleanOption(InlineTemplateFunctions,
- "c++-template-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineCXXAllocator() {
- return getBooleanOption(InlineCXXAllocator,
- "c++-allocator-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineCXXContainerMethods() {
- return getBooleanOption(InlineCXXContainerMethods,
- "c++-container-inlining",
- /*Default=*/false);
-}
-
-bool AnalyzerOptions::mayInlineCXXSharedPtrDtor() {
- return getBooleanOption(InlineCXXSharedPtrDtor,
- "c++-shared_ptr-inlining",
- /*Default=*/false);
-}
-
-bool AnalyzerOptions::mayInlineCXXTemporaryDtors() {
- return getBooleanOption(InlineCXXTemporaryDtors,
- "c++-temp-dtor-inlining",
- /*Default=*/true);
-}
-
-bool AnalyzerOptions::mayInlineObjCMethod() {
- return getBooleanOption(ObjCInliningMode,
- "objc-inlining",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldSuppressNullReturnPaths() {
- return getBooleanOption(SuppressNullReturnPaths,
- "suppress-null-return-paths",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() {
- return getBooleanOption(AvoidSuppressingNullArgumentPaths,
- "avoid-suppressing-null-argument-paths",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldSuppressInlinedDefensiveChecks() {
- return getBooleanOption(SuppressInlinedDefensiveChecks,
- "suppress-inlined-defensive-checks",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
- return getBooleanOption(SuppressFromCXXStandardLibrary,
- "suppress-c++-stdlib",
- /* Default = */ true);
-}
-
-bool AnalyzerOptions::shouldCrosscheckWithZ3() {
- return getBooleanOption(CrosscheckWithZ3,
- "crosscheck-with-z3",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldReportIssuesInMainSourceFile() {
- return getBooleanOption(ReportIssuesInMainSourceFile,
- "report-in-main-source-file",
- /* Default = */ false);
-}
-
-
-bool AnalyzerOptions::shouldWriteStableReportFilename() {
- return getBooleanOption(StableReportFilename,
- "stable-report-filename",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldSerializeStats() {
- return getBooleanOption(SerializeStats,
- "serialize-stats",
- /* Default = */ false);
-}
-
-bool AnalyzerOptions::shouldElideConstructors() {
- return getBooleanOption(ElideConstructors,
- "elide-constructors",
- /* Default = */ true);
-}
-
-int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal,
+int AnalyzerOptions::getCheckerIntegerOption(StringRef Name, int DefaultVal,
const CheckerBase *C,
- bool SearchInParents) {
- SmallString<10> StrBuf;
- llvm::raw_svector_ostream OS(StrBuf);
- OS << DefaultVal;
-
- StringRef V = C ? getCheckerOption(C->getTagDescription(), Name, OS.str(),
- SearchInParents)
- : StringRef(Config.insert(std::make_pair(Name, OS.str()))
- .first->second);
-
- int Res = DefaultVal;
- bool b = V.getAsInteger(10, Res);
- assert(!b && "analyzer-config option should be numeric");
- (void)b;
- return Res;
-}
-
-StringRef AnalyzerOptions::getOptionAsString(StringRef Name,
- StringRef DefaultVal,
- const CheckerBase *C,
- bool SearchInParents) {
- return C ? getCheckerOption(C->getTagDescription(), Name, DefaultVal,
- SearchInParents)
- : StringRef(
- Config.insert(std::make_pair(Name, DefaultVal)).first->second);
-}
-
-unsigned AnalyzerOptions::getAlwaysInlineSize() {
- if (!AlwaysInlineSize.hasValue())
- AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3);
- return AlwaysInlineSize.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxInlinableSize() {
- if (!MaxInlinableSize.hasValue()) {
- int DefaultValue = 0;
- UserModeKind HighLevelMode = getUserMode();
- switch (HighLevelMode) {
- default:
- llvm_unreachable("Invalid mode.");
- case UMK_Shallow:
- DefaultValue = 4;
- break;
- case UMK_Deep:
- DefaultValue = 100;
- break;
- }
-
- MaxInlinableSize = getOptionAsInteger("max-inlinable-size", DefaultValue);
- }
- return MaxInlinableSize.getValue();
-}
-
-unsigned AnalyzerOptions::getGraphTrimInterval() {
- if (!GraphTrimInterval.hasValue())
- GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000);
- return GraphTrimInterval.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxSymbolComplexity() {
- if (!MaxSymbolComplexity.hasValue())
- MaxSymbolComplexity = getOptionAsInteger("max-symbol-complexity", 35);
- return MaxSymbolComplexity.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
- if (!MaxTimesInlineLarge.hasValue())
- MaxTimesInlineLarge = getOptionAsInteger("max-times-inline-large", 32);
- return MaxTimesInlineLarge.getValue();
-}
-
-unsigned AnalyzerOptions::getMinCFGSizeTreatFunctionsAsLarge() {
- if (!MinCFGSizeTreatFunctionsAsLarge.hasValue())
- MinCFGSizeTreatFunctionsAsLarge = getOptionAsInteger(
- "min-cfg-size-treat-functions-as-large", 14);
- return MinCFGSizeTreatFunctionsAsLarge.getValue();
-}
-
-unsigned AnalyzerOptions::getMaxNodesPerTopLevelFunction() {
- if (!MaxNodesPerTopLevelFunction.hasValue()) {
- int DefaultValue = 0;
- UserModeKind HighLevelMode = getUserMode();
- switch (HighLevelMode) {
- default:
- llvm_unreachable("Invalid mode.");
- case UMK_Shallow:
- DefaultValue = 75000;
- break;
- case UMK_Deep:
- DefaultValue = 225000;
- break;
- }
- MaxNodesPerTopLevelFunction = getOptionAsInteger("max-nodes", DefaultValue);
- }
- return MaxNodesPerTopLevelFunction.getValue();
-}
-
-bool AnalyzerOptions::shouldSynthesizeBodies() {
- return getBooleanOption("faux-bodies", true);
-}
-
-bool AnalyzerOptions::shouldPrunePaths() {
- return getBooleanOption("prune-paths", true);
-}
-
-bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
- return getBooleanOption("cfg-conditional-static-initializers", true);
-}
-
-bool AnalyzerOptions::shouldInlineLambdas() {
- if (!InlineLambdas.hasValue())
- InlineLambdas = getBooleanOption("inline-lambdas", /*Default=*/true);
- return InlineLambdas.getValue();
-}
-
-bool AnalyzerOptions::shouldWidenLoops() {
- if (!WidenLoops.hasValue())
- WidenLoops = getBooleanOption("widen-loops", /*Default=*/false);
- return WidenLoops.getValue();
-}
-
-bool AnalyzerOptions::shouldUnrollLoops() {
- if (!UnrollLoops.hasValue())
- UnrollLoops = getBooleanOption("unroll-loops", /*Default=*/false);
- return UnrollLoops.getValue();
-}
-
-bool AnalyzerOptions::shouldDisplayNotesAsEvents() {
- if (!DisplayNotesAsEvents.hasValue())
- DisplayNotesAsEvents =
- getBooleanOption("notes-as-events", /*Default=*/false);
- return DisplayNotesAsEvents.getValue();
-}
-
-bool AnalyzerOptions::shouldAggressivelySimplifyBinaryOperation() {
- if (!AggressiveBinaryOperationSimplification.hasValue())
- AggressiveBinaryOperationSimplification =
- getBooleanOption("aggressive-binary-operation-simplification",
- /*Default=*/false);
- return AggressiveBinaryOperationSimplification.getValue();
-}
-
-StringRef AnalyzerOptions::getCTUDir() {
- if (!CTUDir.hasValue()) {
- CTUDir = getOptionAsString("ctu-dir", "");
- if (!llvm::sys::fs::is_directory(*CTUDir))
- CTUDir = "";
- }
- return CTUDir.getValue();
-}
-
-bool AnalyzerOptions::naiveCTUEnabled() {
- if (!NaiveCTU.hasValue()) {
- NaiveCTU = getBooleanOption("experimental-enable-naive-ctu-analysis",
- /*Default=*/false);
- }
- return NaiveCTU.getValue();
-}
-
-StringRef AnalyzerOptions::getCTUIndexName() {
- if (!CTUIndexName.hasValue())
- CTUIndexName = getOptionAsString("ctu-index-name", "externalFnMap.txt");
- return CTUIndexName.getValue();
+ bool SearchInParents) const {
+ int Ret = DefaultVal;
+ bool HasFailed = getCheckerStringOption(Name, std::to_string(DefaultVal), C,
+ SearchInParents)
+ .getAsInteger(10, Ret);
+ assert(!HasFailed && "analyzer-config option should be numeric");
+ (void)HasFailed;
+ return Ret;
}
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index db4c1432ccc3..d8ed6942de81 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -207,7 +207,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
switch (Op) {
default:
- assert(false && "Invalid Opcode.");
+ llvm_unreachable("Invalid Opcode.");
case BO_Mul:
return &getValue( V1 * V2 );
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index f990eb6a058d..fd7f53210490 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -546,7 +546,8 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece &P,
}
}
-static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
+static void CompactMacroExpandedPieces(PathPieces &path,
+ const SourceManager& SM);
std::shared_ptr<PathDiagnosticControlFlowPiece> generateDiagForSwitchOP(
@@ -819,7 +820,7 @@ void generateMinimalDiagForBlockEdge(const ExplodedNode *N, BlockEdge BE,
// and values by tracing interesting calculations backwards through evaluated
// expressions along a path. This is probably overly complicated, but the idea
// is that if an expression computed an "interesting" value, the child
-// expressions are are also likely to be "interesting" as well (which then
+// expressions are also likely to be "interesting" as well (which then
// propagates to the values they in turn compute). This reverse propagation
// is needed to track interesting correlations across function call boundaries,
// where formal arguments bind to actual arguments, etc. This is also needed
@@ -841,7 +842,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
default:
if (!isa<CastExpr>(Ex))
break;
- // Fall through.
+ LLVM_FALLTHROUGH;
case Stmt::BinaryOperatorClass:
case Stmt::UnaryOperatorClass: {
for (const Stmt *SubStmt : Ex->children()) {
@@ -861,8 +862,7 @@ static void reversePropagateIntererstingSymbols(BugReport &R,
static void reversePropagateInterestingSymbols(BugReport &R,
InterestingExprs &IE,
const ProgramState *State,
- const LocationContext *CalleeCtx,
- const LocationContext *CallerCtx)
+ const LocationContext *CalleeCtx)
{
// FIXME: Handle non-CallExpr-based CallEvents.
const StackFrameContext *Callee = CalleeCtx->getStackFrame();
@@ -967,8 +967,7 @@ static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
/// Adds a sanitized control-flow diagnostic edge to a path.
static void addEdgeToPath(PathPieces &path,
PathDiagnosticLocation &PrevLoc,
- PathDiagnosticLocation NewLoc,
- const LocationContext *LC) {
+ PathDiagnosticLocation NewLoc) {
if (!NewLoc.isValid())
return;
@@ -1043,7 +1042,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
// not from declaration.
if (D->hasBody())
addEdgeToPath(PD.getActivePath(), PrevLoc,
- PathDiagnosticLocation::createBegin(D, SM), CalleeLC);
+ PathDiagnosticLocation::createBegin(D, SM));
}
// Did we visit an entire call?
@@ -1108,7 +1107,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
// We are descending into a call (backwards). Construct
// a new call piece to contain the path pieces for that call.
- auto C = PathDiagnosticCallPiece::construct(N, *CE, SM);
+ auto C = PathDiagnosticCallPiece::construct(*CE, SM);
// Record the mapping from call piece to LocationContext.
LCM[&C->path] = CE->getCalleeContext();
@@ -1121,7 +1120,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
N->getLocationContext());
}
// Add the edge to the return site.
- addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn);
PrevLoc.invalidate();
}
@@ -1151,7 +1150,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
if (!isa<ObjCForCollectionStmt>(PS->getStmt())) {
PathDiagnosticLocation L =
PathDiagnosticLocation(PS->getStmt(), SM, PDB.LC);
- addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L);
}
} else if (auto BE = P.getAs<BlockEdge>()) {
@@ -1168,8 +1167,7 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
const LocationContext *CalleeCtx = PDB.LC;
if (CallerCtx != CalleeCtx && AddPathEdges) {
reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
- N->getState().get(),
- CalleeCtx, CallerCtx);
+ N->getState().get(), CalleeCtx);
}
}
@@ -1194,13 +1192,12 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
"of the loop");
p->setPrunable(true);
- addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation());
PD.getActivePath().push_front(std::move(p));
if (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
addEdgeToPath(PD.getActivePath(), PrevLoc,
- PathDiagnosticLocation::createEndBrace(CS, SM),
- PDB.LC);
+ PathDiagnosticLocation::createEndBrace(CS, SM));
}
}
@@ -1236,13 +1233,13 @@ static void generatePathDiagnosticsForNode(const ExplodedNode *N,
auto PE = std::make_shared<PathDiagnosticEventPiece>(L, str);
PE->setPrunable(true);
addEdgeToPath(PD.getActivePath(), PrevLoc,
- PE->getLocation(), PDB.LC);
+ PE->getLocation());
PD.getActivePath().push_front(std::move(PE));
}
} else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) ||
isa<GotoStmt>(Term)) {
PathDiagnosticLocation L(Term, SM, PDB.LC);
- addEdgeToPath(PD.getActivePath(), PrevLoc, L, PDB.LC);
+ addEdgeToPath(PD.getActivePath(), PrevLoc, L);
}
}
}
@@ -1269,7 +1266,7 @@ static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
if (!S)
break;
- if (isa<ExprWithCleanups>(S) ||
+ if (isa<FullExpr>(S) ||
isa<CXXBindTemporaryExpr>(S) ||
isa<SubstNonTypeTemplateParmExpr>(S))
continue;
@@ -1540,8 +1537,7 @@ static Optional<size_t> getLengthOnSingleLine(SourceManager &SM,
/// - if there is an inlined call between the edges instead of a single event.
/// - if the whole statement is large enough that having subexpression arrows
/// might be helpful.
-static void removeContextCycles(PathPieces &Path, SourceManager &SM,
- ParentMap &PM) {
+static void removeContextCycles(PathPieces &Path, SourceManager &SM) {
for (PathPieces::iterator I = Path.begin(), E = Path.end(); I != E; ) {
// Pattern match the current piece and its successor.
const auto *PieceI = dyn_cast<PathDiagnosticControlFlowPiece>(I->get());
@@ -1632,8 +1628,8 @@ static void removePunyEdges(PathPieces &path, SourceManager &SM,
if (isConditionForTerminator(end, endParent))
continue;
- SourceLocation FirstLoc = start->getLocStart();
- SourceLocation SecondLoc = end->getLocStart();
+ SourceLocation FirstLoc = start->getBeginLoc();
+ SourceLocation SecondLoc = end->getBeginLoc();
if (!SM.isWrittenInSameFile(FirstLoc, SecondLoc))
continue;
@@ -1844,7 +1840,7 @@ static bool optimizeEdges(PathPieces &path, SourceManager &SM,
// and aesthetically pleasing.
addContextEdges(path, SM, PM, LC);
// Remove "cyclical" edges that include one or more context edges.
- removeContextCycles(path, SM, PM);
+ removeContextCycles(path, SM);
// Hoist edges originating from branch conditions to branches
// for simple branches.
simplifySimpleBranches(path);
@@ -1881,6 +1877,22 @@ static void dropFunctionEntryEdge(PathPieces &Path, LocationContextMap &LCM,
using VisitorsDiagnosticsTy = llvm::DenseMap<const ExplodedNode *,
std::vector<std::shared_ptr<PathDiagnosticPiece>>>;
+/// Populate executes lines with lines containing at least one diagnostics.
+static void updateExecutedLinesWithDiagnosticPieces(
+ PathDiagnostic &PD) {
+
+ PathPieces path = PD.path.flatten(/*ShouldFlattenMacros=*/true);
+ FilesToLineNumsMap &ExecutedLines = PD.getExecutedLines();
+
+ for (const auto &P : path) {
+ FullSourceLoc Loc = P->getLocation().asLocation().getExpansionLoc();
+ FileID FID = Loc.getFileID();
+ unsigned LineNo = Loc.getLineNumber();
+ assert(FID.isValid());
+ ExecutedLines[FID].insert(LineNo);
+ }
+}
+
/// This function is responsible for generating diagnostic pieces that are
/// *not* provided by bug report visitors.
/// These diagnostics may differ depending on the consumer's settings,
@@ -1946,8 +1958,7 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
continue;
if (AddPathEdges)
- addEdgeToPath(PD->getActivePath(), PrevLoc, Note->getLocation(),
- PDB.LC);
+ addEdgeToPath(PD->getActivePath(), PrevLoc, Note->getLocation());
updateStackPiecesWithMessage(*Note, CallStack);
PD->getActivePath().push_front(Note);
}
@@ -1959,15 +1970,13 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
const StackFrameContext *CalleeLC = PDB.LC->getStackFrame();
const Decl *D = CalleeLC->getDecl();
addEdgeToPath(PD->getActivePath(), PrevLoc,
- PathDiagnosticLocation::createBegin(D, SM), CalleeLC);
+ PathDiagnosticLocation::createBegin(D, SM));
}
- if (!AddPathEdges && GenerateDiagnostics)
- CompactPathDiagnostic(PD->getMutablePieces(), SM);
// Finally, prune the diagnostic path of uninteresting stuff.
if (!PD->path.empty()) {
- if (R->shouldPrunePath() && Opts.shouldPrunePaths()) {
+ if (R->shouldPrunePath() && Opts.ShouldPrunePaths) {
bool stillHasNotes =
removeUnneededCalls(PD->getMutablePieces(), R, LCM);
assert(stillHasNotes);
@@ -1997,6 +2006,10 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
removeRedundantMsgs(PD->getMutablePieces());
removeEdgesToDefaultInitializers(PD->getMutablePieces());
}
+
+ if (GenerateDiagnostics && Opts.ShouldDisplayMacroExpansions)
+ CompactMacroExpandedPieces(PD->getMutablePieces(), SM);
+
return PD;
}
@@ -2007,8 +2020,6 @@ static std::unique_ptr<PathDiagnostic> generatePathDiagnosticForConsumer(
void BugType::anchor() {}
-void BugType::FlushReports(BugReporter &BR) {}
-
void BuiltinBug::anchor() {}
//===----------------------------------------------------------------------===//
@@ -2237,14 +2248,6 @@ void BugReporter::FlushReports() {
if (BugTypes.isEmpty())
return;
- // First flush the warnings for each BugType. This may end up creating new
- // warnings and new BugTypes.
- // FIXME: Only NSErrorChecker needs BugType's FlushReports.
- // Turn NSErrorChecker into a proper checker and remove this.
- SmallVector<const BugType *, 16> bugTypes(BugTypes.begin(), BugTypes.end());
- for (const auto I : bugTypes)
- const_cast<BugType*>(I)->FlushReports(*this);
-
// We need to flush reports in deterministic order to ensure the order
// of the reports is consistent between runs.
for (const auto EQ : EQClassesVector)
@@ -2380,8 +2383,7 @@ TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
}
// Sort the error paths from longest to shortest.
- llvm::sort(ReportNodes.begin(), ReportNodes.end(),
- PriorityCompare<true>(PriorityMap));
+ llvm::sort(ReportNodes, PriorityCompare<true>(PriorityMap));
}
bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
@@ -2437,9 +2439,10 @@ bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
return true;
}
-/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
-/// and collapses PathDiagosticPieces that are expanded by macros.
-static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+/// CompactMacroExpandedPieces - This function postprocesses a PathDiagnostic
+/// object and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactMacroExpandedPieces(PathPieces &path,
+ const SourceManager& SM) {
using MacroStackTy =
std::vector<
std::pair<std::shared_ptr<PathDiagnosticMacroPiece>, SourceLocation>>;
@@ -2455,7 +2458,7 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
// Recursively compact calls.
if (auto *call = dyn_cast<PathDiagnosticCallPiece>(&*piece)) {
- CompactPathDiagnostic(call->path, SM);
+ CompactMacroExpandedPieces(call->path, SM);
}
// Get the location of the PathDiagnosticPiece.
@@ -2569,7 +2572,7 @@ generateVisitorsDiagnostics(BugReport *R, const ExplodedNode *ErrorNode,
}
for (auto &V : visitors) {
- auto P = V->VisitNode(NextNode, Pred, BRC, *R);
+ auto P = V->VisitNode(NextNode, BRC, *R);
if (P)
(*Notes)[NextNode].push_back(std::move(P));
}
@@ -2618,7 +2621,7 @@ std::pair<BugReport*, std::unique_ptr<VisitorsDiagnosticsTy>> findValidReport(
generateVisitorsDiagnostics(R, ErrorNode, BRC);
if (R->isValid()) {
- if (Opts.shouldCrosscheckWithZ3()) {
+ if (Opts.ShouldCrosscheckWithZ3) {
// If crosscheck is enabled, remove all visitors, add the refutation
// visitor and check again
R->clearVisitors();
@@ -2806,16 +2809,15 @@ static bool isInevitablySinking(const ExplodedNode *N) {
DFSWorkList.pop_back();
Visited.insert(Blk);
+ // If at least one path reaches the CFG exit, it means that control is
+ // returned to the caller. For now, say that we are not sure what
+ // happens next. If necessary, this can be improved to analyze
+ // the parent StackFrameContext's call site in a similar manner.
+ if (Blk == &Cfg.getExit())
+ return false;
+
for (const auto &Succ : Blk->succs()) {
if (const CFGBlock *SuccBlk = Succ.getReachableBlock()) {
- if (SuccBlk == &Cfg.getExit()) {
- // If at least one path reaches the CFG exit, it means that control is
- // returned to the caller. For now, say that we are not sure what
- // happens next. If necessary, this can be improved to analyze
- // the parent StackFrameContext's call site in a similar manner.
- return false;
- }
-
if (!isImmediateSinkBlock(SuccBlk) && !Visited.count(SuccBlk)) {
// If the block has reachable child blocks that aren't no-return,
// add them to the worklist.
@@ -2961,7 +2963,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
}
PathPieces &Pieces = PD->getMutablePieces();
- if (getAnalyzerOptions().shouldDisplayNotesAsEvents()) {
+ if (getAnalyzerOptions().ShouldDisplayNotesAsEvents) {
// For path diagnostic consumers that don't support extra notes,
// we may optionally convert those to path notes.
for (auto I = report->getNotes().rbegin(),
@@ -2985,6 +2987,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
for (const auto &i : Meta)
PD->addMeta(i);
+ updateExecutedLinesWithDiagnosticPieces(*PD);
Consumer->HandlePathDiagnostic(std::move(PD));
}
}
@@ -2993,7 +2996,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
/// into \p ExecutedLines.
static void populateExecutedLinesWithFunctionSignature(
const Decl *Signature, SourceManager &SM,
- std::unique_ptr<FilesToLineNumsMap> &ExecutedLines) {
+ FilesToLineNumsMap &ExecutedLines) {
SourceRange SignatureSourceRange;
const Stmt* Body = Signature->getBody();
if (const auto FD = dyn_cast<FunctionDecl>(Signature)) {
@@ -3006,22 +3009,26 @@ static void populateExecutedLinesWithFunctionSignature(
SourceLocation Start = SignatureSourceRange.getBegin();
SourceLocation End = Body ? Body->getSourceRange().getBegin()
: SignatureSourceRange.getEnd();
+ if (!Start.isValid() || !End.isValid())
+ return;
unsigned StartLine = SM.getExpansionLineNumber(Start);
unsigned EndLine = SM.getExpansionLineNumber(End);
FileID FID = SM.getFileID(SM.getExpansionLoc(Start));
for (unsigned Line = StartLine; Line <= EndLine; Line++)
- ExecutedLines->operator[](FID.getHashValue()).insert(Line);
+ ExecutedLines[FID].insert(Line);
}
static void populateExecutedLinesWithStmt(
const Stmt *S, SourceManager &SM,
- std::unique_ptr<FilesToLineNumsMap> &ExecutedLines) {
+ FilesToLineNumsMap &ExecutedLines) {
SourceLocation Loc = S->getSourceRange().getBegin();
+ if (!Loc.isValid())
+ return;
SourceLocation ExpansionLoc = SM.getExpansionLoc(Loc);
FileID FID = SM.getFileID(ExpansionLoc);
unsigned LineNo = SM.getExpansionLineNumber(ExpansionLoc);
- ExecutedLines->operator[](FID.getHashValue()).insert(LineNo);
+ ExecutedLines[FID].insert(LineNo);
}
/// \return all executed lines including function signatures on the path
@@ -3034,13 +3041,13 @@ findExecutedLines(SourceManager &SM, const ExplodedNode *N) {
if (N->getFirstPred() == nullptr) {
// First node: show signature of the entrance point.
const Decl *D = N->getLocationContext()->getDecl();
- populateExecutedLinesWithFunctionSignature(D, SM, ExecutedLines);
+ populateExecutedLinesWithFunctionSignature(D, SM, *ExecutedLines);
} else if (auto CE = N->getLocationAs<CallEnter>()) {
// Inlined function: show signature.
const Decl* D = CE->getCalleeContext()->getDecl();
- populateExecutedLinesWithFunctionSignature(D, SM, ExecutedLines);
+ populateExecutedLinesWithFunctionSignature(D, SM, *ExecutedLines);
} else if (const Stmt *S = PathDiagnosticLocation::getStmt(N)) {
- populateExecutedLinesWithStmt(S, SM, ExecutedLines);
+ populateExecutedLinesWithStmt(S, SM, *ExecutedLines);
// Show extra context for some parent kinds.
const Stmt *P = N->getParentMap().getParent(S);
@@ -3049,12 +3056,12 @@ findExecutedLines(SourceManager &SM, const ExplodedNode *N) {
// return statement is generated, but we do want to show the whole
// return.
if (const auto *RS = dyn_cast_or_null<ReturnStmt>(P)) {
- populateExecutedLinesWithStmt(RS, SM, ExecutedLines);
+ populateExecutedLinesWithStmt(RS, SM, *ExecutedLines);
P = N->getParentMap().getParent(RS);
}
if (P && (isa<SwitchCase>(P) || isa<LabelStmt>(P)))
- populateExecutedLinesWithStmt(P, SM, ExecutedLines);
+ populateExecutedLinesWithStmt(P, SM, *ExecutedLines);
}
N = N->getFirstPred();
@@ -3093,7 +3100,7 @@ BugReporter::generateDiagnosticForConsumerMap(
// report location to the last piece in the main source file.
AnalyzerOptions &Opts = getAnalyzerOptions();
for (auto const &P : *Out)
- if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
+ if (Opts.ShouldReportIssuesInMainSourceFile && !Opts.AnalyzeAll)
P.second->resetDiagnosticLocationToMainFile();
return Out;
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index d4d33c1746ce..da94b6eb21e9 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -42,10 +42,10 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
@@ -71,12 +71,6 @@ using namespace ento;
// Utility functions.
//===----------------------------------------------------------------------===//
-bool bugreporter::isDeclRefExprToReference(const Expr *E) {
- if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
- return DRE->getDecl()->getType()->isReferenceType();
- return false;
-}
-
static const Expr *peelOffPointerArithmetic(const BinaryOperator *B) {
if (B->isAdditiveOp() && B->getType()->isPointerType()) {
if (B->getLHS()->getType()->isPointerType()) {
@@ -142,8 +136,8 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
E = AE->getBase();
} else if (const auto *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
- } else if (const auto *EWC = dyn_cast<ExprWithCleanups>(E)) {
- E = EWC->getSubExpr();
+ } else if (const auto *FE = dyn_cast<FullExpr>(E)) {
+ E = FE->getSubExpr();
} else {
// Other arbitrary stuff.
break;
@@ -160,34 +154,19 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return E;
}
-const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
- const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
- if (const auto *BE = dyn_cast<BinaryOperator>(S))
- return BE->getRHS();
- return nullptr;
-}
-
-const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
- const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
- if (const auto *RS = dyn_cast<ReturnStmt>(S))
- return RS->getRetValue();
- return nullptr;
-}
-
//===----------------------------------------------------------------------===//
// Definitions for bug reporter visitors.
//===----------------------------------------------------------------------===//
std::shared_ptr<PathDiagnosticPiece>
-BugReporterVisitor::getEndPath(BugReporterContext &BRC,
- const ExplodedNode *EndPathNode, BugReport &BR) {
+BugReporterVisitor::getEndPath(BugReporterContext &,
+ const ExplodedNode *, BugReport &) {
return nullptr;
}
void
-BugReporterVisitor::finalizeVisitor(BugReporterContext &BRC,
- const ExplodedNode *EndPathNode,
- BugReport &BR) {}
+BugReporterVisitor::finalizeVisitor(BugReporterContext &,
+ const ExplodedNode *, BugReport &) {}
std::shared_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath(
BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
@@ -269,10 +248,14 @@ namespace {
/// pointer dereference outside.
class NoStoreFuncVisitor final : public BugReporterVisitor {
const SubRegion *RegionOfInterest;
+ MemRegionManager &MmrMgr;
const SourceManager &SM;
const PrintingPolicy &PP;
- static constexpr const char *DiagnosticsMsg =
- "Returning without writing to '";
+
+ /// Recursion limit for dereferencing fields when looking for the
+ /// region of interest.
+ /// The limit of two indicates that we will dereference fields only once.
+ static const unsigned DEREFERENCE_LIMIT = 2;
/// Frames writing into \c RegionOfInterest.
/// This visitor generates a note only if a function does not write into
@@ -285,21 +268,22 @@ class NoStoreFuncVisitor final : public BugReporterVisitor {
llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingRegion;
llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingCalculated;
+ using RegionVector = SmallVector<const MemRegion *, 5>;
public:
NoStoreFuncVisitor(const SubRegion *R)
- : RegionOfInterest(R),
- SM(R->getMemRegionManager()->getContext().getSourceManager()),
- PP(R->getMemRegionManager()->getContext().getPrintingPolicy()) {}
+ : RegionOfInterest(R), MmrMgr(*R->getMemRegionManager()),
+ SM(MmrMgr.getContext().getSourceManager()),
+ PP(MmrMgr.getContext().getPrintingPolicy()) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int Tag = 0;
ID.AddPointer(&Tag);
+ ID.AddPointer(RegionOfInterest);
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) override {
+ BugReporterContext &BR,
+ BugReport &) override {
const LocationContext *Ctx = N->getLocationContext();
const StackFrameContext *SCtx = Ctx->getStackFrame();
@@ -307,48 +291,66 @@ public:
auto CallExitLoc = N->getLocationAs<CallExitBegin>();
// No diagnostic if region was modified inside the frame.
- if (!CallExitLoc)
+ if (!CallExitLoc || isRegionOfInterestModifiedInFrame(N))
return nullptr;
CallEventRef<> Call =
- BRC.getStateManager().getCallEventManager().getCaller(SCtx, State);
+ BR.getStateManager().getCallEventManager().getCaller(SCtx, State);
+
+ if (SM.isInSystemHeader(Call->getDecl()->getSourceRange().getBegin()))
+ return nullptr;
// Region of interest corresponds to an IVar, exiting a method
// which could have written into that IVar, but did not.
- if (const auto *MC = dyn_cast<ObjCMethodCall>(Call))
- if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest))
- if (potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
- IvarR->getDecl()) &&
- !isRegionOfInterestModifiedInFrame(N))
- return notModifiedMemberDiagnostics(
- Ctx, *CallExitLoc, Call, MC->getReceiverSVal().getAsRegion());
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(Call)) {
+ if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest)) {
+ const MemRegion *SelfRegion = MC->getReceiverSVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(SelfRegion) &&
+ potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
+ IvarR->getDecl()))
+ return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, SelfRegion,
+ "self", /*FirstIsReferenceType=*/false,
+ 1);
+ }
+ }
if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
const MemRegion *ThisR = CCall->getCXXThisVal().getAsRegion();
if (RegionOfInterest->isSubRegionOf(ThisR)
- && !CCall->getDecl()->isImplicit()
- && !isRegionOfInterestModifiedInFrame(N))
- return notModifiedMemberDiagnostics(Ctx, *CallExitLoc, Call, ThisR);
+ && !CCall->getDecl()->isImplicit())
+ return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, ThisR,
+ "this",
+ /*FirstIsReferenceType=*/false, 1);
+
+ // Do not generate diagnostics for not modified parameters in
+ // constructors.
+ return nullptr;
}
ArrayRef<ParmVarDecl *> parameters = getCallParameters(Call);
for (unsigned I = 0; I < Call->getNumArgs() && I < parameters.size(); ++I) {
const ParmVarDecl *PVD = parameters[I];
SVal S = Call->getArgSVal(I);
- unsigned IndirectionLevel = 1;
+ bool ParamIsReferenceType = PVD->getType()->isReferenceType();
+ std::string ParamName = PVD->getNameAsString();
+
+ int IndirectionLevel = 1;
QualType T = PVD->getType();
while (const MemRegion *R = S.getAsRegion()) {
- if (RegionOfInterest->isSubRegionOf(R)
- && !isPointerToConst(PVD->getType())) {
+ if (RegionOfInterest->isSubRegionOf(R) && !isPointerToConst(T))
+ return notModifiedDiagnostics(Ctx, *CallExitLoc, Call, {}, R,
+ ParamName, ParamIsReferenceType,
+ IndirectionLevel);
- if (isRegionOfInterestModifiedInFrame(N))
- return nullptr;
-
- return notModifiedParameterDiagnostics(
- Ctx, *CallExitLoc, Call, PVD, R, IndirectionLevel);
- }
QualType PT = T->getPointeeType();
if (PT.isNull() || PT->isVoidType()) break;
+
+ if (const RecordDecl *RD = PT->getAsRecordDecl())
+ if (auto P = findRegionOfInterestInRecord(RD, State, R))
+ return notModifiedDiagnostics(
+ Ctx, *CallExitLoc, Call, *P, RegionOfInterest, ParamName,
+ ParamIsReferenceType, IndirectionLevel);
+
S = State->getSVal(R, PT);
T = PT;
IndirectionLevel++;
@@ -359,20 +361,94 @@ public:
}
private:
+ /// Attempts to find the region of interest in a given CXX decl,
+ /// by either following the base classes or fields.
+ /// Dereferences fields up to a given recursion limit.
+ /// Note that \p Vec is passed by value, leading to quadratic copying cost,
+ /// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
+ /// \return A chain fields leading to the region of interest or None.
+ const Optional<RegionVector>
+ findRegionOfInterestInRecord(const RecordDecl *RD, ProgramStateRef State,
+ const MemRegion *R,
+ const RegionVector &Vec = {},
+ int depth = 0) {
+
+ if (depth == DEREFERENCE_LIMIT) // Limit the recursion depth.
+ return None;
+
+ if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
+ if (!RDX->hasDefinition())
+ return None;
+
+ // Recursively examine the base classes.
+ // Note that following base classes does not increase the recursion depth.
+ if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto II : RDX->bases())
+ if (const RecordDecl *RRD = II.getType()->getAsRecordDecl())
+ if (auto Out = findRegionOfInterestInRecord(RRD, State, R, Vec, depth))
+ return Out;
+
+ for (const FieldDecl *I : RD->fields()) {
+ QualType FT = I->getType();
+ const FieldRegion *FR = MmrMgr.getFieldRegion(I, cast<SubRegion>(R));
+ const SVal V = State->getSVal(FR);
+ const MemRegion *VR = V.getAsRegion();
+
+ RegionVector VecF = Vec;
+ VecF.push_back(FR);
+
+ if (RegionOfInterest == VR)
+ return VecF;
+
+ if (const RecordDecl *RRD = FT->getAsRecordDecl())
+ if (auto Out =
+ findRegionOfInterestInRecord(RRD, State, FR, VecF, depth + 1))
+ return Out;
+
+ QualType PT = FT->getPointeeType();
+ if (PT.isNull() || PT->isVoidType() || !VR) continue;
+
+ if (const RecordDecl *RRD = PT->getAsRecordDecl())
+ if (auto Out =
+ findRegionOfInterestInRecord(RRD, State, VR, VecF, depth + 1))
+ return Out;
+
+ }
+
+ return None;
+ }
/// \return Whether the method declaration \p Parent
/// syntactically has a binary operation writing into the ivar \p Ivar.
bool potentiallyWritesIntoIvar(const Decl *Parent,
const ObjCIvarDecl *Ivar) {
using namespace ast_matchers;
- if (!Parent || !Parent->getBody())
+ const char * IvarBind = "Ivar";
+ if (!Parent || !Parent->hasBody())
return false;
StatementMatcher WriteIntoIvarM = binaryOperator(
- hasOperatorName("="), hasLHS(ignoringParenImpCasts(objcIvarRefExpr(
- hasDeclaration(equalsNode(Ivar))))));
+ hasOperatorName("="),
+ hasLHS(ignoringParenImpCasts(
+ objcIvarRefExpr(hasDeclaration(equalsNode(Ivar))).bind(IvarBind))));
StatementMatcher ParentM = stmt(hasDescendant(WriteIntoIvarM));
auto Matches = match(ParentM, *Parent->getBody(), Parent->getASTContext());
- return !Matches.empty();
+ for (BoundNodes &Match : Matches) {
+ auto IvarRef = Match.getNodeAs<ObjCIvarRefExpr>(IvarBind);
+ if (IvarRef->isFreeIvar())
+ return true;
+
+ const Expr *Base = IvarRef->getBase();
+ if (const auto *ICE = dyn_cast<ImplicitCastExpr>(Base))
+ Base = ICE->getSubExpr();
+
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Base))
+ if (const auto *ID = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
+ if (ID->getParameterKind() == ImplicitParamDecl::ObjCSelf)
+ return true;
+
+ return false;
+ }
+ return false;
}
/// Check and lazily calculate whether the region of interest is
@@ -433,6 +509,8 @@ private:
RuntimeDefinition RD = Call->getRuntimeDefinition();
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(RD.getDecl()))
return FD->parameters();
+ if (const auto *MD = dyn_cast_or_null<ObjCMethodDecl>(RD.getDecl()))
+ return MD->parameters();
return Call->parameters();
}
@@ -443,123 +521,112 @@ private:
Ty->getPointeeType().getCanonicalType().isConstQualified();
}
- /// \return Diagnostics piece for the member field not modified
- /// in a given function.
- std::shared_ptr<PathDiagnosticPiece> notModifiedMemberDiagnostics(
- const LocationContext *Ctx,
- CallExitBegin &CallExitLoc,
- CallEventRef<> Call,
- const MemRegion *ArgRegion) {
- const char *TopRegionName = isa<ObjCMethodCall>(Call) ? "self" : "this";
+ /// \return Diagnostics piece for region not modified in the current function.
+ std::shared_ptr<PathDiagnosticPiece>
+ notModifiedDiagnostics(const LocationContext *Ctx, CallExitBegin &CallExitLoc,
+ CallEventRef<> Call, const RegionVector &FieldChain,
+ const MemRegion *MatchedRegion, StringRef FirstElement,
+ bool FirstIsReferenceType, unsigned IndirectionLevel) {
+
+ PathDiagnosticLocation L;
+ if (const ReturnStmt *RS = CallExitLoc.getReturnStmt()) {
+ L = PathDiagnosticLocation::createBegin(RS, SM, Ctx);
+ } else {
+ L = PathDiagnosticLocation(
+ Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(),
+ SM);
+ }
+
SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
- os << DiagnosticsMsg;
- bool out = prettyPrintRegionName(TopRegionName, "->", /*IsReference=*/true,
- /*IndirectionLevel=*/1, ArgRegion, os, PP);
+ os << "Returning without writing to '";
- // Return nothing if we have failed to pretty-print.
- if (!out)
+ // Do not generate the note if failed to pretty-print.
+ if (!prettyPrintRegionName(FirstElement, FirstIsReferenceType,
+ MatchedRegion, FieldChain, IndirectionLevel, os))
return nullptr;
os << "'";
- PathDiagnosticLocation L =
- getPathDiagnosticLocation(CallExitLoc.getReturnStmt(), SM, Ctx, Call);
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
- /// \return Diagnostics piece for the parameter \p PVD not modified
- /// in a given function.
- /// \p IndirectionLevel How many times \c ArgRegion has to be dereferenced
- /// before we get to the super region of \c RegionOfInterest
- std::shared_ptr<PathDiagnosticPiece>
- notModifiedParameterDiagnostics(const LocationContext *Ctx,
- CallExitBegin &CallExitLoc,
- CallEventRef<> Call,
- const ParmVarDecl *PVD,
- const MemRegion *ArgRegion,
- unsigned IndirectionLevel) {
- PathDiagnosticLocation L = getPathDiagnosticLocation(
- CallExitLoc.getReturnStmt(), SM, Ctx, Call);
- SmallString<256> sbuf;
- llvm::raw_svector_ostream os(sbuf);
- os << DiagnosticsMsg;
- bool IsReference = PVD->getType()->isReferenceType();
- const char *Sep = IsReference && IndirectionLevel == 1 ? "." : "->";
- bool Success = prettyPrintRegionName(
- PVD->getQualifiedNameAsString().c_str(),
- Sep, IsReference, IndirectionLevel, ArgRegion, os, PP);
-
- // Print the parameter name if the pretty-printing has failed.
- if (!Success)
- PVD->printQualifiedName(os);
- os << "'";
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
- }
+ /// Pretty-print region \p MatchedRegion to \p os.
+ /// \return Whether printing succeeded.
+ bool prettyPrintRegionName(StringRef FirstElement, bool FirstIsReferenceType,
+ const MemRegion *MatchedRegion,
+ const RegionVector &FieldChain,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os) {
- /// \return a path diagnostic location for the optionally
- /// present return statement \p RS.
- PathDiagnosticLocation getPathDiagnosticLocation(const ReturnStmt *RS,
- const SourceManager &SM,
- const LocationContext *Ctx,
- CallEventRef<> Call) {
- if (RS)
- return PathDiagnosticLocation::createBegin(RS, SM, Ctx);
- return PathDiagnosticLocation(
- Call->getRuntimeDefinition().getDecl()->getSourceRange().getEnd(), SM);
- }
+ if (FirstIsReferenceType)
+ IndirectionLevel--;
- /// Pretty-print region \p ArgRegion starting from parent to \p os.
- /// \return whether printing has succeeded
- bool prettyPrintRegionName(StringRef TopRegionName,
- StringRef Sep,
- bool IsReference,
- int IndirectionLevel,
- const MemRegion *ArgRegion,
- llvm::raw_svector_ostream &os,
- const PrintingPolicy &PP) {
- SmallVector<const MemRegion *, 5> Subregions;
+ RegionVector RegionSequence;
+
+ // Add the regions in the reverse order, then reverse the resulting array.
+ assert(RegionOfInterest->isSubRegionOf(MatchedRegion));
const MemRegion *R = RegionOfInterest;
- while (R != ArgRegion) {
- if (!(isa<FieldRegion>(R) || isa<CXXBaseObjectRegion>(R) ||
- isa<ObjCIvarRegion>(R)))
- return false; // Pattern-matching failed.
- Subregions.push_back(R);
+ while (R != MatchedRegion) {
+ RegionSequence.push_back(R);
R = cast<SubRegion>(R)->getSuperRegion();
}
- bool IndirectReference = !Subregions.empty();
+ std::reverse(RegionSequence.begin(), RegionSequence.end());
+ RegionSequence.append(FieldChain.begin(), FieldChain.end());
+
+ StringRef Sep;
+ for (const MemRegion *R : RegionSequence) {
+
+ // Just keep going up to the base region.
+ // Element regions may appear due to casts.
+ if (isa<CXXBaseObjectRegion>(R) || isa<CXXTempObjectRegion>(R))
+ continue;
+
+ if (Sep.empty())
+ Sep = prettyPrintFirstElement(FirstElement,
+ /*MoreItemsExpected=*/true,
+ IndirectionLevel, os);
- if (IndirectReference)
- IndirectionLevel--; // Due to "->" symbol.
+ os << Sep;
- if (IsReference)
- IndirectionLevel--; // Due to reference semantics.
+ // Can only reasonably pretty-print DeclRegions.
+ if (!isa<DeclRegion>(R))
+ return false;
- bool ShouldSurround = IndirectReference && IndirectionLevel > 0;
+ const auto *DR = cast<DeclRegion>(R);
+ Sep = DR->getValueType()->isAnyPointerType() ? "->" : ".";
+ DR->getDecl()->getDeclName().print(os, PP);
+ }
- if (ShouldSurround)
+ if (Sep.empty())
+ prettyPrintFirstElement(FirstElement,
+ /*MoreItemsExpected=*/false, IndirectionLevel,
+ os);
+ return true;
+ }
+
+ /// Print first item in the chain, return new separator.
+ StringRef prettyPrintFirstElement(StringRef FirstElement,
+ bool MoreItemsExpected,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os) {
+ StringRef Out = ".";
+
+ if (IndirectionLevel > 0 && MoreItemsExpected) {
+ IndirectionLevel--;
+ Out = "->";
+ }
+
+ if (IndirectionLevel > 0 && MoreItemsExpected)
os << "(";
- for (int i = 0; i < IndirectionLevel; i++)
+
+ for (int i=0; i<IndirectionLevel; i++)
os << "*";
- os << TopRegionName;
- if (ShouldSurround)
+ os << FirstElement;
+
+ if (IndirectionLevel > 0 && MoreItemsExpected)
os << ")";
- for (auto I = Subregions.rbegin(), E = Subregions.rend(); I != E; ++I) {
- if (const auto *FR = dyn_cast<FieldRegion>(*I)) {
- os << Sep;
- FR->getDecl()->getDeclName().print(os, PP);
- Sep = ".";
- } else if (const auto *IR = dyn_cast<ObjCIvarRegion>(*I)) {
- os << "->";
- IR->getDecl()->getDeclName().print(os, PP);
- Sep = ".";
- } else if (isa<CXXBaseObjectRegion>(*I)) {
- continue; // Just keep going up to the base region.
- } else {
- llvm_unreachable("Previous check has missed an unexpected region");
- }
- }
- return true;
+ return Out;
}
};
@@ -579,7 +646,6 @@ public:
ValueAtDereference(V) {}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override {
if (WasModified)
@@ -590,10 +656,10 @@ public:
return nullptr;
const SourceManager &SMgr = BRC.getSourceManager();
- if (auto Loc = matchAssignment(N, BRC)) {
+ if (auto Loc = matchAssignment(N)) {
if (isFunctionMacroExpansion(*Loc, SMgr)) {
std::string MacroName = getMacroName(*Loc, BRC);
- SourceLocation BugLoc = BugPoint->getStmt()->getLocStart();
+ SourceLocation BugLoc = BugPoint->getStmt()->getBeginLoc();
if (!BugLoc.isMacroID() || getMacroName(BugLoc, BRC) != MacroName)
BR.markInvalid(getTag(), MacroName.c_str());
}
@@ -610,8 +676,8 @@ public:
bool EnableNullFPSuppression, BugReport &BR,
const SVal V) {
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
- if (EnableNullFPSuppression && Options.shouldSuppressNullReturnPaths()
- && V.getAs<Loc>())
+ if (EnableNullFPSuppression &&
+ Options.ShouldSuppressNullReturnPaths && V.getAs<Loc>())
BR.addVisitor(llvm::make_unique<MacroNullReturnSuppressionVisitor>(
R->getAs<SubRegion>(), V));
}
@@ -628,8 +694,7 @@ public:
private:
/// \return Source location of right hand side of an assignment
/// into \c RegionOfInterest, empty optional if none found.
- Optional<SourceLocation> matchAssignment(const ExplodedNode *N,
- BugReporterContext &BRC) {
+ Optional<SourceLocation> matchAssignment(const ExplodedNode *N) {
const Stmt *S = PathDiagnosticLocation::getStmt(N);
ProgramStateRef State = N->getState();
auto *LCtx = N->getLocationContext();
@@ -641,12 +706,12 @@ private:
if (const Expr *RHS = VD->getInit())
if (RegionOfInterest->isSubRegionOf(
State->getLValue(VD, LCtx).getAsRegion()))
- return RHS->getLocStart();
+ return RHS->getBeginLoc();
} else if (const auto *BO = dyn_cast<BinaryOperator>(S)) {
const MemRegion *R = N->getSVal(BO->getLHS()).getAsRegion();
const Expr *RHS = BO->getRHS();
if (BO->isAssignmentOp() && RegionOfInterest->isSubRegionOf(R)) {
- return RHS->getLocStart();
+ return RHS->getBeginLoc();
}
}
return None;
@@ -670,10 +735,14 @@ class ReturnVisitor : public BugReporterVisitor {
bool EnableNullFPSuppression;
bool ShouldInvalidate = true;
+ AnalyzerOptions& Options;
public:
- ReturnVisitor(const StackFrameContext *Frame, bool Suppressed)
- : StackFrame(Frame), EnableNullFPSuppression(Suppressed) {}
+ ReturnVisitor(const StackFrameContext *Frame,
+ bool Suppressed,
+ AnalyzerOptions &Options)
+ : StackFrame(Frame), EnableNullFPSuppression(Suppressed),
+ Options(Options) {}
static void *getTag() {
static int Tag = 0;
@@ -701,10 +770,10 @@ public:
// First, find when we processed the statement.
do {
- if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
+ if (auto CEE = Node->getLocationAs<CallExitEnd>())
if (CEE->getCalleeContext()->getCallSite() == S)
break;
- if (Optional<StmtPoint> SP = Node->getLocationAs<StmtPoint>())
+ if (auto SP = Node->getLocationAs<StmtPoint>())
if (SP->getStmt() == S)
break;
@@ -739,23 +808,19 @@ public:
AnalyzerOptions &Options = State->getAnalysisManager().options;
bool EnableNullFPSuppression = false;
- if (InEnableNullFPSuppression && Options.shouldSuppressNullReturnPaths())
+ if (InEnableNullFPSuppression &&
+ Options.ShouldSuppressNullReturnPaths)
if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
BR.markInteresting(CalleeContext);
BR.addVisitor(llvm::make_unique<ReturnVisitor>(CalleeContext,
- EnableNullFPSuppression));
- }
-
- /// Returns true if any counter-suppression heuristics are enabled for
- /// ReturnVisitor.
- static bool hasCounterSuppression(AnalyzerOptions &Options) {
- return Options.shouldAvoidSuppressingNullArgumentPaths();
+ EnableNullFPSuppression,
+ Options));
}
std::shared_ptr<PathDiagnosticPiece>
- visitNodeInitial(const ExplodedNode *N, const ExplodedNode *PrevN,
+ visitNodeInitial(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
// Only print a message at the interesting return statement.
if (N->getLocationContext() != StackFrame)
@@ -799,37 +864,40 @@ public:
RetE = RetE->IgnoreParenCasts();
- // If we can't prove the return value is 0, just mark it interesting, and
- // make sure to track it into any further inner functions.
- if (!State->isNull(V).isConstrainedTrue()) {
- BR.markInteresting(V);
- ReturnVisitor::addVisitorIfNecessary(N, RetE, BR,
- EnableNullFPSuppression);
- return nullptr;
- }
-
// If we're returning 0, we should track where that 0 came from.
- bugreporter::trackNullOrUndefValue(N, RetE, BR, /*IsArg*/ false,
- EnableNullFPSuppression);
+ bugreporter::trackExpressionValue(N, RetE, BR, EnableNullFPSuppression);
// Build an appropriate message based on the return value.
SmallString<64> Msg;
llvm::raw_svector_ostream Out(Msg);
- if (V.getAs<Loc>()) {
- // If we have counter-suppression enabled, make sure we keep visiting
- // future nodes. We want to emit a path note as well, in case
- // the report is resurrected as valid later on.
- AnalyzerOptions &Options = BRC.getAnalyzerOptions();
- if (EnableNullFPSuppression && hasCounterSuppression(Options))
- Mode = MaybeUnsuppress;
+ if (State->isNull(V).isConstrainedTrue()) {
+ if (V.getAs<Loc>()) {
+
+ // If we have counter-suppression enabled, make sure we keep visiting
+ // future nodes. We want to emit a path note as well, in case
+ // the report is resurrected as valid later on.
+ if (EnableNullFPSuppression &&
+ Options.ShouldAvoidSuppressingNullArgumentPaths)
+ Mode = MaybeUnsuppress;
+
+ if (RetE->getType()->isObjCObjectPointerType()) {
+ Out << "Returning nil";
+ } else {
+ Out << "Returning null pointer";
+ }
+ } else {
+ Out << "Returning zero";
+ }
- if (RetE->getType()->isObjCObjectPointerType())
- Out << "Returning nil";
- else
- Out << "Returning null pointer";
} else {
- Out << "Returning zero";
+ if (auto CI = V.getAs<nonloc::ConcreteInt>()) {
+ Out << "Returning the value " << CI->getValue();
+ } else if (V.getAs<Loc>()) {
+ Out << "Returning pointer";
+ } else {
+ Out << "Returning value";
+ }
}
if (LValue) {
@@ -855,11 +923,10 @@ public:
}
std::shared_ptr<PathDiagnosticPiece>
- visitNodeMaybeUnsuppress(const ExplodedNode *N, const ExplodedNode *PrevN,
+ visitNodeMaybeUnsuppress(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
#ifndef NDEBUG
- AnalyzerOptions &Options = BRC.getAnalyzerOptions();
- assert(hasCounterSuppression(Options));
+ assert(Options.ShouldAvoidSuppressingNullArgumentPaths);
#endif
// Are we at the entry node for this call?
@@ -893,8 +960,7 @@ public:
if (!State->isNull(*ArgV).isConstrainedTrue())
continue;
- if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true,
- EnableNullFPSuppression))
+ if (bugreporter::trackExpressionValue(N, ArgE, BR, EnableNullFPSuppression))
ShouldInvalidate = false;
// If we /can't/ track the null pointer, we should err on the side of
@@ -906,14 +972,13 @@ public:
}
std::shared_ptr<PathDiagnosticPiece> VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override {
switch (Mode) {
case Initial:
- return visitNodeInitial(N, PrevN, BRC, BR);
+ return visitNodeInitial(N, BRC, BR);
case MaybeUnsuppress:
- return visitNodeMaybeUnsuppress(N, PrevN, BRC, BR);
+ return visitNodeMaybeUnsuppress(N, BRC, BR);
case Satisfied:
return nullptr;
}
@@ -921,7 +986,7 @@ public:
llvm_unreachable("Invalid visit mode!");
}
- void finalizeVisitor(BugReporterContext &BRC, const ExplodedNode *N,
+ void finalizeVisitor(BugReporterContext &, const ExplodedNode *,
BugReport &BR) override {
if (EnableNullFPSuppression && ShouldInvalidate)
BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
@@ -1087,12 +1152,12 @@ static void showBRDefaultDiagnostics(llvm::raw_svector_ostream& os,
std::shared_ptr<PathDiagnosticPiece>
FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC, BugReport &BR) {
if (Satisfied)
return nullptr;
const ExplodedNode *StoreSite = nullptr;
+ const ExplodedNode *Pred = Succ->getFirstPred();
const Expr *InitE = nullptr;
bool IsParam = false;
@@ -1173,12 +1238,11 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
if (!IsParam)
InitE = InitE->IgnoreParenCasts();
- bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam,
- EnableNullFPSuppression);
- } else {
- ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
- BR, EnableNullFPSuppression);
+ bugreporter::trackExpressionValue(StoreSite, InitE, BR,
+ EnableNullFPSuppression);
}
+ ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
+ BR, EnableNullFPSuppression);
}
// Okay, we've found the binding. Emit an appropriate message.
@@ -1204,8 +1268,7 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (const auto *BDR =
dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
- if (Optional<KnownSVal> KV =
- State->getSVal(OriginalR).getAs<KnownSVal>())
+ if (auto KV = State->getSVal(OriginalR).getAs<KnownSVal>())
BR.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
*KV, OriginalR, EnableNullFPSuppression));
}
@@ -1260,8 +1323,8 @@ bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
std::shared_ptr<PathDiagnosticPiece>
TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC, BugReport &BR) {
+ BugReporterContext &BRC, BugReport &) {
+ const ExplodedNode *PrevN = N->getFirstPred();
if (IsSatisfied)
return nullptr;
@@ -1316,7 +1379,7 @@ SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
: V(Value) {
// Check if the visitor is disabled.
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
- if (!Options.shouldSuppressInlinedDefensiveChecks())
+ if (!Options.ShouldSuppressInlinedDefensiveChecks)
IsSatisfied = true;
assert(N->getState()->isNull(V).isConstrainedTrue() &&
@@ -1336,9 +1399,9 @@ const char *SuppressInlineDefensiveChecksVisitor::getTag() {
std::shared_ptr<PathDiagnosticPiece>
SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) {
+ const ExplodedNode *Pred = Succ->getFirstPred();
if (IsSatisfied)
return nullptr;
@@ -1379,7 +1442,7 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
CurTerminatorStmt = BE->getSrc()->getTerminator().getStmt();
} else if (auto SP = CurPoint.getAs<StmtPoint>()) {
const Stmt *CurStmt = SP->getStmt();
- if (!CurStmt->getLocStart().isMacroID())
+ if (!CurStmt->getBeginLoc().isMacroID())
return nullptr;
CFGStmtMap *Map = CurLC->getAnalysisDeclContext()->getCFGStmtMap();
@@ -1391,9 +1454,9 @@ SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
if (!CurTerminatorStmt)
return nullptr;
- SourceLocation TerminatorLoc = CurTerminatorStmt->getLocStart();
+ SourceLocation TerminatorLoc = CurTerminatorStmt->getBeginLoc();
if (TerminatorLoc.isMacroID()) {
- SourceLocation BugLoc = BugPoint->getStmt()->getLocStart();
+ SourceLocation BugLoc = BugPoint->getStmt()->getBeginLoc();
// Suppress reports unless we are in that same macro.
if (!BugLoc.isMacroID() ||
@@ -1427,11 +1490,13 @@ static const MemRegion *getLocationRegionIfReference(const Expr *E,
return nullptr;
}
+/// \return A subexpression of {@code Ex} which represents the
+/// expression-of-interest.
static const Expr *peelOffOuterExpr(const Expr *Ex,
const ExplodedNode *N) {
Ex = Ex->IgnoreParenCasts();
- if (const auto *EWC = dyn_cast<ExprWithCleanups>(Ex))
- return peelOffOuterExpr(EWC->getSubExpr(), N);
+ if (const auto *FE = dyn_cast<FullExpr>(Ex))
+ return peelOffOuterExpr(FE->getSubExpr(), N);
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ex))
return peelOffOuterExpr(OVE->getSourceExpr(), N);
if (const auto *POE = dyn_cast<PseudoObjectExpr>(Ex)) {
@@ -1471,121 +1536,72 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
if (const Expr *SubEx = peelOffPointerArithmetic(BO))
return peelOffOuterExpr(SubEx, N);
- return Ex;
-}
+ if (auto *UO = dyn_cast<UnaryOperator>(Ex)) {
+ if (UO->getOpcode() == UO_LNot)
+ return peelOffOuterExpr(UO->getSubExpr(), N);
-/// Walk through nodes until we get one that matches the statement exactly.
-/// Alternately, if we hit a known lvalue for the statement, we know we've
-/// gone too far (though we can likely track the lvalue better anyway).
-static const ExplodedNode* findNodeForStatement(const ExplodedNode *N,
- const Stmt *S,
- const Expr *Inner) {
- do {
- const ProgramPoint &pp = N->getLocation();
- if (auto ps = pp.getAs<StmtPoint>()) {
- if (ps->getStmt() == S || ps->getStmt() == Inner)
- break;
- } else if (auto CEE = pp.getAs<CallExitEnd>()) {
- if (CEE->getCalleeContext()->getCallSite() == S ||
- CEE->getCalleeContext()->getCallSite() == Inner)
- break;
- }
- N = N->getFirstPred();
- } while (N);
- return N;
+ // FIXME: There's a hack in our Store implementation that always computes
+ // field offsets around null pointers as if they are always equal to 0.
+ // The idea here is to report accesses to fields as null dereferences
+ // even though the pointer value that's being dereferenced is actually
+ // the offset of the field rather than exactly 0.
+ // See the FIXME in StoreManager's getLValueFieldOrIvar() method.
+ // This code interacts heavily with this hack; otherwise the value
+ // would not be null at all for most fields, so we'd be unable to track it.
+ if (UO->getOpcode() == UO_AddrOf && UO->getSubExpr()->isLValue())
+ if (const Expr *DerefEx = bugreporter::getDerefExpr(UO->getSubExpr()))
+ return peelOffOuterExpr(DerefEx, N);
+ }
+
+ return Ex;
}
/// Find the ExplodedNode where the lvalue (the value of 'Ex')
/// was computed.
static const ExplodedNode* findNodeForExpression(const ExplodedNode *N,
- const Expr *Inner) {
+ const Expr *Inner) {
while (N) {
- if (auto P = N->getLocation().getAs<PostStmt>()) {
- if (P->getStmt() == Inner)
- break;
- }
+ if (PathDiagnosticLocation::getStmt(N) == Inner)
+ return N;
N = N->getFirstPred();
}
- assert(N && "Unable to find the lvalue node.");
return N;
}
-/// Performing operator `&' on an lvalue expression is essentially a no-op.
-/// Then, if we are taking addresses of fields or elements, these are also
-/// unlikely to matter.
-static const Expr* peelOfOuterAddrOf(const Expr* Ex) {
- Ex = Ex->IgnoreParenCasts();
-
- // FIXME: There's a hack in our Store implementation that always computes
- // field offsets around null pointers as if they are always equal to 0.
- // The idea here is to report accesses to fields as null dereferences
- // even though the pointer value that's being dereferenced is actually
- // the offset of the field rather than exactly 0.
- // See the FIXME in StoreManager's getLValueFieldOrIvar() method.
- // This code interacts heavily with this hack; otherwise the value
- // would not be null at all for most fields, so we'd be unable to track it.
- if (const auto *Op = dyn_cast<UnaryOperator>(Ex))
- if (Op->getOpcode() == UO_AddrOf && Op->getSubExpr()->isLValue())
- if (const Expr *DerefEx = bugreporter::getDerefExpr(Op->getSubExpr()))
- return DerefEx;
- return Ex;
-}
-
-bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
- const Stmt *S,
- BugReport &report, bool IsArg,
- bool EnableNullFPSuppression) {
- if (!S || !N)
+bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
+ const Expr *E, BugReport &report,
+ bool EnableNullFPSuppression) {
+ if (!E || !InputNode)
return false;
- if (const auto *Ex = dyn_cast<Expr>(S))
- S = peelOffOuterExpr(Ex, N);
-
- const Expr *Inner = nullptr;
- if (const auto *Ex = dyn_cast<Expr>(S)) {
- Ex = peelOfOuterAddrOf(Ex);
- Ex = Ex->IgnoreParenCasts();
-
- if (Ex && (ExplodedGraph::isInterestingLValueExpr(Ex)
- || CallEvent::isCallStmt(Ex)))
- Inner = Ex;
- }
-
- if (IsArg && !Inner) {
- assert(N->getLocation().getAs<CallEnter>() && "Tracking arg but not at call");
- } else {
- N = findNodeForStatement(N, S, Inner);
- if (!N)
- return false;
- }
+ const Expr *Inner = peelOffOuterExpr(E, InputNode);
+ const ExplodedNode *LVNode = findNodeForExpression(InputNode, Inner);
+ if (!LVNode)
+ return false;
- ProgramStateRef state = N->getState();
+ ProgramStateRef LVState = LVNode->getState();
// The message send could be nil due to the receiver being nil.
// At this point in the path, the receiver should be live since we are at the
// message send expr. If it is nil, start tracking it.
- if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(S, N))
- trackNullOrUndefValue(N, Receiver, report, /* IsArg=*/ false,
- EnableNullFPSuppression);
+ if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(Inner, LVNode))
+ trackExpressionValue(LVNode, Receiver, report, EnableNullFPSuppression);
// See if the expression we're interested refers to a variable.
// If so, we can track both its contents and constraints on its value.
- if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
- const ExplodedNode *LVNode = findNodeForExpression(N, Inner);
- ProgramStateRef LVState = LVNode->getState();
+ if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
SVal LVal = LVNode->getSVal(Inner);
- const MemRegion *RR = getLocationRegionIfReference(Inner, N);
+ const MemRegion *RR = getLocationRegionIfReference(Inner, LVNode);
bool LVIsNull = LVState->isNull(LVal).isConstrainedTrue();
// If this is a C++ reference to a null pointer, we are tracking the
// pointer. In addition, we should find the store at which the reference
// got initialized.
- if (RR && !LVIsNull) {
+ if (RR && !LVIsNull)
if (auto KV = LVal.getAs<KnownSVal>())
report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
*KV, RR, EnableNullFPSuppression));
- }
// In case of C++ references, we want to differentiate between a null
// reference and reference to null pointer.
@@ -1602,9 +1618,8 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
llvm::make_unique<NoStoreFuncVisitor>(cast<SubRegion>(R)));
MacroNullReturnSuppressionVisitor::addMacroVisitorIfNecessary(
- N, R, EnableNullFPSuppression, report, V);
+ LVNode, R, EnableNullFPSuppression, report, V);
- report.markInteresting(R);
report.markInteresting(V);
report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(R));
@@ -1614,14 +1629,12 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
V.castAs<DefinedSVal>(), false));
// Add visitor, which will suppress inline defensive checks.
- if (auto DV = V.getAs<DefinedSVal>()) {
+ if (auto DV = V.getAs<DefinedSVal>())
if (!DV->isZeroConstant() && LVState->isNull(*DV).isConstrainedTrue() &&
- EnableNullFPSuppression) {
+ EnableNullFPSuppression)
report.addVisitor(
llvm::make_unique<SuppressInlineDefensiveChecksVisitor>(*DV,
- LVNode));
- }
- }
+ LVNode));
if (auto KV = V.getAs<KnownSVal>())
report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
@@ -1632,40 +1645,44 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
// If the expression is not an "lvalue expression", we can still
// track the constraints on its contents.
- SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
+ SVal V = LVState->getSValAsScalarOrLoc(Inner, LVNode->getLocationContext());
- // If the value came from an inlined function call, we should at least make
- // sure that function isn't pruned in our output.
- if (const auto *E = dyn_cast<Expr>(S))
- S = E->IgnoreParenCasts();
+ ReturnVisitor::addVisitorIfNecessary(
+ LVNode, Inner, report, EnableNullFPSuppression);
- ReturnVisitor::addVisitorIfNecessary(N, S, report, EnableNullFPSuppression);
-
- // Uncomment this to find cases where we aren't properly getting the
- // base value that was dereferenced.
- // assert(!V.isUnknownOrUndef());
// Is it a symbolic value?
if (auto L = V.getAs<loc::MemRegionVal>()) {
report.addVisitor(llvm::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
+ // FIXME: this is a hack for fixing a later crash when attempting to
+ // dereference a void* pointer.
+ // We should not try to dereference pointers at all when we don't care
+ // what is written inside the pointer.
+ bool CanDereference = true;
+ if (const auto *SR = dyn_cast<SymbolicRegion>(L->getRegion()))
+ if (SR->getSymbol()->getType()->getPointeeType()->isVoidType())
+ CanDereference = false;
+
// At this point we are dealing with the region's LValue.
// However, if the rvalue is a symbolic region, we should track it as well.
// Try to use the correct type when looking up the value.
SVal RVal;
- if (const auto *E = dyn_cast<Expr>(S))
- RVal = state->getRawSVal(L.getValue(), E->getType());
- else
- RVal = state->getSVal(L->getRegion());
+ if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
+ RVal = LVState->getRawSVal(L.getValue(), Inner->getType());
+ } else if (CanDereference) {
+ RVal = LVState->getSVal(L->getRegion());
+ }
- if (auto KV = RVal.getAs<KnownSVal>())
- report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
+ if (CanDereference)
+ if (auto KV = RVal.getAs<KnownSVal>())
+ report.addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
*KV, L->getRegion(), EnableNullFPSuppression));
const MemRegion *RegionRVal = RVal.getAsRegion();
if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
report.markInteresting(RegionRVal);
report.addVisitor(llvm::make_unique<TrackConstraintBRVisitor>(
- loc::MemRegionVal(RegionRVal), false));
+ loc::MemRegionVal(RegionRVal), /*assumption=*/false));
}
}
return true;
@@ -1687,7 +1704,6 @@ const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
std::shared_ptr<PathDiagnosticPiece>
NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC, BugReport &BR) {
Optional<PreStmt> P = N->getLocationAs<PreStmt>();
if (!P)
@@ -1714,8 +1730,8 @@ NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- bugreporter::trackNullOrUndefValue(N, Receiver, BR, /*IsArg*/ false,
- /*EnableNullFPSuppression*/ false);
+ bugreporter::trackExpressionValue(N, Receiver, BR,
+ /*EnableNullFPSuppression*/ false);
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
@@ -1768,9 +1784,9 @@ const char *ConditionBRVisitor::getTag() {
}
std::shared_ptr<PathDiagnosticPiece>
-ConditionBRVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *Prev,
+ConditionBRVisitor::VisitNode(const ExplodedNode *N,
BugReporterContext &BRC, BugReport &BR) {
- auto piece = VisitNodeImpl(N, Prev, BRC, BR);
+ auto piece = VisitNodeImpl(N, BRC, BR);
if (piece) {
piece->setTag(getTag());
if (auto *ev = dyn_cast<PathDiagnosticEventPiece>(piece.get()))
@@ -1781,11 +1797,10 @@ ConditionBRVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *Prev,
std::shared_ptr<PathDiagnosticPiece>
ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
- const ExplodedNode *Prev,
BugReporterContext &BRC, BugReport &BR) {
ProgramPoint progPoint = N->getLocation();
ProgramStateRef CurrentState = N->getState();
- ProgramStateRef PrevState = Prev->getState();
+ ProgramStateRef PrevState = N->getFirstPred()->getState();
// Compare the GDMs of the state, because that is where constraints
// are managed. Note that ensure that we only look at nodes that
@@ -1936,8 +1951,8 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
// Use heuristics to determine if Ex is a macro expending to a literal and
// if so, use the macro's name.
- SourceLocation LocStart = Ex->getLocStart();
- SourceLocation LocEnd = Ex->getLocEnd();
+ SourceLocation LocStart = Ex->getBeginLoc();
+ SourceLocation LocEnd = Ex->getEndLoc();
if (LocStart.isMacroID() && LocEnd.isMacroID() &&
(isa<GNUNullExpr>(Ex) ||
isa<ObjCBoolLiteralExpr>(Ex) ||
@@ -1951,10 +1966,10 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
bool beginAndEndAreTheSameMacro = StartName.equals(EndName);
bool partOfParentMacro = false;
- if (ParentEx->getLocStart().isMacroID()) {
+ if (ParentEx->getBeginLoc().isMacroID()) {
StringRef PName = Lexer::getImmediateMacroNameForDiagnostics(
- ParentEx->getLocStart(), BRC.getSourceManager(),
- BRC.getASTContext().getLangOpts());
+ ParentEx->getBeginLoc(), BRC.getSourceManager(),
+ BRC.getASTContext().getLangOpts());
partOfParentMacro = PName.equals(StartName);
}
@@ -2205,7 +2220,7 @@ void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
// the user's fault, we currently don't report them very well, and
// Note that this will not help for any other data structure libraries, like
// TR1, Boost, or llvm/ADT.
- if (Options.shouldSuppressFromCXXStandardLibrary()) {
+ if (Options.ShouldSuppressFromCXXStandardLibrary) {
BR.markInvalid(getTag(), nullptr);
return;
} else {
@@ -2277,7 +2292,6 @@ void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
std::shared_ptr<PathDiagnosticPiece>
UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
BugReporterContext &BRC, BugReport &BR) {
ProgramStateRef State = N->getState();
ProgramPoint ProgLoc = N->getLocation();
@@ -2328,8 +2342,7 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
std::shared_ptr<PathDiagnosticPiece>
CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
- const ExplodedNode *Pred,
- BugReporterContext &BRC, BugReport &BR) {
+ BugReporterContext &BRC, BugReport &) {
if (Satisfied)
return nullptr;
@@ -2380,11 +2393,11 @@ CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
}
std::shared_ptr<PathDiagnosticPiece>
-TaintBugVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN,
- BugReporterContext &BRC, BugReport &BR) {
+TaintBugVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC, BugReport &) {
// Find the ExplodedNode where the taint was first introduced
- if (!N->getState()->isTainted(V) || PrevN->getState()->isTainted(V))
+ if (!N->getState()->isTainted(V) || N->getFirstPred()->getState()->isTainted(V))
return nullptr;
const Stmt *S = PathDiagnosticLocation::getStmt(N);
@@ -2406,36 +2419,43 @@ FalsePositiveRefutationBRVisitor::FalsePositiveRefutationBRVisitor()
void FalsePositiveRefutationBRVisitor::finalizeVisitor(
BugReporterContext &BRC, const ExplodedNode *EndPathNode, BugReport &BR) {
// Collect new constraints
- VisitNode(EndPathNode, nullptr, BRC, BR);
+ VisitNode(EndPathNode, BRC, BR);
// Create a refutation manager
- std::unique_ptr<SMTSolver> RefutationSolver = CreateZ3Solver();
+ SMTSolverRef RefutationSolver = CreateZ3Solver();
ASTContext &Ctx = BRC.getASTContext();
// Add constraints to the solver
for (const auto &I : Constraints) {
- SymbolRef Sym = I.first;
+ const SymbolRef Sym = I.first;
+ auto RangeIt = I.second.begin();
- SMTExprRef Constraints = RefutationSolver->fromBoolean(false);
- for (const auto &Range : I.second) {
+ SMTExprRef Constraints = SMTConv::getRangeExpr(
+ RefutationSolver, Ctx, Sym, RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true);
+ while ((++RangeIt) != I.second.end()) {
Constraints = RefutationSolver->mkOr(
- Constraints,
- RefutationSolver->getRangeExpr(Ctx, Sym, Range.From(), Range.To(),
- /*InRange=*/true));
+ Constraints, SMTConv::getRangeExpr(RefutationSolver, Ctx, Sym,
+ RangeIt->From(), RangeIt->To(),
+ /*InRange=*/true));
}
+
RefutationSolver->addConstraint(Constraints);
}
// And check for satisfiability
- if (RefutationSolver->check().isConstrainedFalse())
+ Optional<bool> isSat = RefutationSolver->check();
+ if (!isSat.hasValue())
+ return;
+
+ if (!isSat.getValue())
BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
}
std::shared_ptr<PathDiagnosticPiece>
FalsePositiveRefutationBRVisitor::VisitNode(const ExplodedNode *N,
- const ExplodedNode *PrevN,
- BugReporterContext &BRC,
- BugReport &BR) {
+ BugReporterContext &,
+ BugReport &) {
// Collect new constraints
const ConstraintRangeTy &NewCs = N->getState()->get<ConstraintRange>();
ConstraintRangeTy::Factory &CF =
diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt
index de994b598e59..167f78af6289 100644
--- a/lib/StaticAnalyzer/Core/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -20,7 +20,6 @@ add_clang_library(clangStaticAnalyzerCore
CheckerContext.cpp
CheckerHelpers.cpp
CheckerManager.cpp
- CheckerRegistry.cpp
CommonBugCategories.cpp
ConstraintManager.cpp
CoreEngine.cpp
@@ -44,14 +43,16 @@ add_clang_library(clangStaticAnalyzerCore
RangeConstraintManager.cpp
RangedConstraintManager.cpp
RegionStore.cpp
- SValBuilder.cpp
- SVals.cpp
+ RetainSummaryManager.cpp
+ SarifDiagnostics.cpp
SimpleConstraintManager.cpp
SimpleSValBuilder.cpp
- SMTConstraintManager.cpp
Store.cpp
SubEngine.cpp
+ SValBuilder.cpp
+ SVals.cpp
SymbolManager.cpp
+ TaintManager.cpp
WorkList.cpp
Z3ConstraintManager.cpp
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index fe9260e32dd8..0e7f31502e81 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -169,18 +169,27 @@ bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
AnalysisDeclContext *CallEvent::getCalleeAnalysisDeclContext() const {
const Decl *D = getDecl();
-
- // If the callee is completely unknown, we cannot construct the stack frame.
if (!D)
return nullptr;
- // FIXME: Skip virtual functions for now. There's no easy procedure to foresee
- // the exact decl that should be used, especially when it's not a definition.
- if (const Decl *RD = getRuntimeDefinition().getDecl())
- if (RD != D)
- return nullptr;
+ // TODO: For now we skip functions without definitions, even if we have
+ // our own getDecl(), because it's hard to find out which re-declaration
+ // is going to be used, and usually clients don't really care about this
+ // situation because there's a loss of precision anyway because we cannot
+ // inline the call.
+ RuntimeDefinition RD = getRuntimeDefinition();
+ if (!RD.getDecl())
+ return nullptr;
+
+ AnalysisDeclContext *ADC =
+ LCtx->getAnalysisDeclContext()->getManager()->getContext(D);
+
+ // TODO: For now we skip virtual functions, because this also rises
+ // the problem of which decl to use, but now it's across different classes.
+ if (RD.mayHaveOtherDefinitions() || RD.getDecl() != ADC->getDecl())
+ return nullptr;
- return LCtx->getAnalysisDeclContext()->getManager()->getContext(D);
+ return ADC;
}
const StackFrameContext *CallEvent::getCalleeStackFrame() const {
@@ -218,7 +227,24 @@ const VarRegion *CallEvent::getParameterLocation(unsigned Index) const {
if (!SFC)
return nullptr;
- const ParmVarDecl *PVD = parameters()[Index];
+ // Retrieve parameters of the definition, which are different from
+ // CallEvent's parameters() because getDecl() isn't necessarily
+ // the definition. SFC contains the definition that would be used
+ // during analysis.
+ const Decl *D = SFC->getDecl();
+
+ // TODO: Refactor into a virtual method of CallEvent, like parameters().
+ const ParmVarDecl *PVD = nullptr;
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ PVD = FD->parameters()[Index];
+ else if (const auto *BD = dyn_cast<BlockDecl>(D))
+ PVD = BD->parameters()[Index];
+ else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ PVD = MD->parameters()[Index];
+ else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
+ PVD = CD->parameters()[Index];
+ assert(PVD && "Unexpected Decl kind!");
+
const VarRegion *VR =
State->getStateManager().getRegionManager().getVarRegion(PVD, SFC);
@@ -285,6 +311,20 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
// TODO: Factor this out + handle the lower level const pointers.
ValuesToInvalidate.push_back(getArgSVal(Idx));
+
+ // If a function accepts an object by argument (which would of course be a
+ // temporary that isn't lifetime-extended), invalidate the object itself,
+ // not only other objects reachable from it. This is necessary because the
+ // destructor has access to the temporary object after the call.
+ // TODO: Support placement arguments once we start
+ // constructing them directly.
+ // TODO: This is unnecessary when there's no destructor, but that's
+ // currently hard to figure out.
+ if (getKind() != CE_CXXAllocator)
+ if (isArgumentConstructedDirectly(Idx))
+ if (auto AdjIdx = getAdjustedParameterIndex(Idx))
+ if (const VarRegion *VR = getParameterLocation(*AdjIdx))
+ ValuesToInvalidate.push_back(loc::MemRegionVal(VR));
}
// Invalidate designated regions using the batch invalidation API.
@@ -319,11 +359,41 @@ bool CallEvent::isCalled(const CallDescription &CD) const {
return false;
if (!CD.IsLookupDone) {
CD.IsLookupDone = true;
- CD.II = &getState()->getStateManager().getContext().Idents.get(CD.FuncName);
+ CD.II = &getState()->getStateManager().getContext().Idents.get(
+ CD.getFunctionName());
}
const IdentifierInfo *II = getCalleeIdentifier();
if (!II || II != CD.II)
return false;
+
+ const Decl *D = getDecl();
+ // If CallDescription provides prefix names, use them to improve matching
+ // accuracy.
+ if (CD.QualifiedName.size() > 1 && D) {
+ const DeclContext *Ctx = D->getDeclContext();
+ // See if we'll be able to match them all.
+ size_t NumUnmatched = CD.QualifiedName.size() - 1;
+ for (; Ctx && isa<NamedDecl>(Ctx); Ctx = Ctx->getParent()) {
+ if (NumUnmatched == 0)
+ break;
+
+ if (const auto *ND = dyn_cast<NamespaceDecl>(Ctx)) {
+ if (ND->getName() == CD.QualifiedName[NumUnmatched - 1])
+ --NumUnmatched;
+ continue;
+ }
+
+ if (const auto *RD = dyn_cast<RecordDecl>(Ctx)) {
+ if (RD->getName() == CD.QualifiedName[NumUnmatched - 1])
+ --NumUnmatched;
+ continue;
+ }
+ }
+
+ if (NumUnmatched > 0)
+ return false;
+ }
+
return (CD.RequiredArgs == CallDescription::NoArgRequirement ||
CD.RequiredArgs == getNumArgs());
}
@@ -433,6 +503,14 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
const ParmVarDecl *ParamDecl = *I;
assert(ParamDecl && "Formal parameter has no decl?");
+ // TODO: Support allocator calls.
+ if (Call.getKind() != CE_CXXAllocator)
+ if (Call.isArgumentConstructedDirectly(Idx))
+ continue;
+
+ // TODO: Allocators should receive the correct size and possibly alignment,
+ // determined in compile-time but not represented as arg-expressions,
+ // which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
if (!ArgVal.isUnknown()) {
Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
@@ -472,17 +550,18 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
return RuntimeDefinition(Decl);
}
- SubEngine *Engine = getState()->getStateManager().getOwningEngine();
- AnalyzerOptions &Opts = Engine->getAnalysisManager().options;
+ SubEngine &Engine = getState()->getStateManager().getOwningEngine();
+ AnalyzerOptions &Opts = Engine.getAnalysisManager().options;
// Try to get CTU definition only if CTUDir is provided.
- if (!Opts.naiveCTUEnabled())
+ if (!Opts.IsNaiveCTUEnabled)
return {};
cross_tu::CrossTranslationUnitContext &CTUCtx =
- *Engine->getCrossTranslationUnitContext();
+ *Engine.getCrossTranslationUnitContext();
llvm::Expected<const FunctionDecl *> CTUDeclOrError =
- CTUCtx.getCrossTUDefinition(FD, Opts.getCTUDir(), Opts.getCTUIndexName());
+ CTUCtx.getCrossTUDefinition(FD, Opts.CTUDir, Opts.CTUIndexName,
+ Opts.DisplayCTUProgress);
if (!CTUDeclOrError) {
handleAllErrors(CTUDeclOrError.takeError(),
@@ -758,7 +837,7 @@ const BlockDataRegion *BlockCall::getBlockRegion() const {
ArrayRef<ParmVarDecl*> BlockCall::parameters() const {
const BlockDecl *D = getDecl();
if (!D)
- return nullptr;
+ return None;
return D->parameters();
}
@@ -1008,7 +1087,7 @@ bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
Selector Sel) const {
assert(IDecl);
AnalysisManager &AMgr =
- getState()->getStateManager().getOwningEngine()->getAnalysisManager();
+ getState()->getStateManager().getOwningEngine().getAnalysisManager();
// If the class interface is declared inside the main file, assume it is not
// subcassed.
// TODO: It could actually be subclassed if the subclass is private as well.
@@ -1290,28 +1369,20 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
const Stmt *CallSite = CalleeCtx->getCallSite();
if (CallSite) {
- if (const CallExpr *CE = dyn_cast<CallExpr>(CallSite))
- return getSimpleCall(CE, State, CallerCtx);
-
- switch (CallSite->getStmtClass()) {
- case Stmt::CXXConstructExprClass:
- case Stmt::CXXTemporaryObjectExprClass: {
- SValBuilder &SVB = State->getStateManager().getSValBuilder();
- const auto *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
- Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
- SVal ThisVal = State->getSVal(ThisPtr);
-
- return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
- ThisVal.getAsRegion(), State, CallerCtx);
- }
- case Stmt::CXXNewExprClass:
- return getCXXAllocatorCall(cast<CXXNewExpr>(CallSite), State, CallerCtx);
- case Stmt::ObjCMessageExprClass:
- return getObjCMethodCall(cast<ObjCMessageExpr>(CallSite),
- State, CallerCtx);
- default:
- llvm_unreachable("This is not an inlineable statement.");
- }
+ if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx))
+ return Out;
+
+ // All other cases are handled by getCall.
+ assert(isa<CXXConstructExpr>(CallSite) &&
+ "This is not an inlineable statement");
+
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ const auto *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+ Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
+ SVal ThisVal = State->getSVal(ThisPtr);
+
+ return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
+ ThisVal.getAsRegion(), State, CallerCtx);
}
// Fall back to the CFG. The only thing we haven't handled yet is
@@ -1338,3 +1409,16 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
E.getAs<CFGBaseDtor>().hasValue(), State,
CallerCtx);
}
+
+CallEventRef<> CallEventManager::getCall(const Stmt *S, ProgramStateRef State,
+ const LocationContext *LC) {
+ if (const auto *CE = dyn_cast<CallExpr>(S)) {
+ return getSimpleCall(CE, State, LC);
+ } else if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
+ return getCXXAllocatorCall(NE, State, LC);
+ } else if (const auto *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ return getObjCMethodCall(ME, State, LC);
+ } else {
+ return nullptr;
+ }
+}
diff --git a/lib/StaticAnalyzer/Core/Checker.cpp b/lib/StaticAnalyzer/Core/Checker.cpp
index b422a8871983..72bfd84b40a3 100644
--- a/lib/StaticAnalyzer/Core/Checker.cpp
+++ b/lib/StaticAnalyzer/Core/Checker.cpp
@@ -17,6 +17,8 @@
using namespace clang;
using namespace ento;
+int ImplicitNullDerefEvent::Tag;
+
StringRef CheckerBase::getTagDescription() const {
return getCheckName().getName();
}
diff --git a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index b9facffcc8b5..e73a22ae3981 100644
--- a/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -21,10 +21,10 @@ namespace ento {
// Recursively find any substatements containing macros
bool containsMacro(const Stmt *S) {
- if (S->getLocStart().isMacroID())
+ if (S->getBeginLoc().isMacroID())
return true;
- if (S->getLocEnd().isMacroID())
+ if (S->getEndLoc().isMacroID())
return true;
for (const Stmt *Child : S->children())
@@ -103,9 +103,9 @@ Nullability getNullabilityAnnotation(QualType Type) {
const auto *AttrType = Type->getAs<AttributedType>();
if (!AttrType)
return Nullability::Unspecified;
- if (AttrType->getAttrKind() == AttributedType::attr_nullable)
+ if (AttrType->getAttrKind() == attr::TypeNullable)
return Nullability::Nullable;
- else if (AttrType->getAttrKind() == AttributedType::attr_nonnull)
+ else if (AttrType->getAttrKind() == attr::TypeNonNull)
return Nullability::Nonnull;
return Nullability::Unspecified;
}
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 712872a15d8a..688c47e984cc 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -441,14 +441,13 @@ void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
ExplodedNode *Pred,
ExprEngine &Eng,
const ReturnStmt *RS) {
- // We define the builder outside of the loop bacause if at least one checkers
- // creates a sucsessor for Pred, we do not need to generate an
+ // We define the builder outside of the loop because if at least one checker
+ // creates a successor for Pred, we do not need to generate an
// autotransition for it.
NodeBuilder Bldr(Pred, Dst, BC);
for (const auto checkFn : EndFunctionCheckers) {
- const ProgramPoint &L = BlockEntrance(BC.Block,
- Pred->getLocationContext(),
- checkFn.Checker);
+ const ProgramPoint &L =
+ FunctionExitPoint(RS, Pred->getLocationContext(), checkFn.Checker);
CheckerContext C(Bldr, Eng, Pred, L);
checkFn(RS, C);
}
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
deleted file mode 100644
index 645845ec2181..000000000000
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ /dev/null
@@ -1,190 +0,0 @@
-//===- CheckerRegistry.cpp - Maintains all available checkers -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/LLVM.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
-#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
-#include <cstddef>
-#include <tuple>
-
-using namespace clang;
-using namespace ento;
-
-static const char PackageSeparator = '.';
-
-using CheckerInfoSet = llvm::SetVector<const CheckerRegistry::CheckerInfo *>;
-
-static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
- const CheckerRegistry::CheckerInfo &b) {
- return a.FullName < b.FullName;
-}
-
-static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
- StringRef packageName) {
- // Does the checker's full name have the package as a prefix?
- if (!checker.FullName.startswith(packageName))
- return false;
-
- // Is the package actually just the name of a specific checker?
- if (checker.FullName.size() == packageName.size())
- return true;
-
- // Is the checker in the package (or a subpackage)?
- if (checker.FullName[packageName.size()] == PackageSeparator)
- return true;
-
- return false;
-}
-
-static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
- const llvm::StringMap<size_t> &packageSizes,
- CheckerOptInfo &opt, CheckerInfoSet &collected) {
- // Use a binary search to find the possible start of the package.
- CheckerRegistry::CheckerInfo packageInfo(nullptr, opt.getName(), "");
- auto end = checkers.cend();
- auto i = std::lower_bound(checkers.cbegin(), end, packageInfo, checkerNameLT);
-
- // If we didn't even find a possible package, give up.
- if (i == end)
- return;
-
- // If what we found doesn't actually start the package, give up.
- if (!isInPackage(*i, opt.getName()))
- return;
-
- // There is at least one checker in the package; claim the option.
- opt.claim();
-
- // See how large the package is.
- // If the package doesn't exist, assume the option refers to a single checker.
- size_t size = 1;
- llvm::StringMap<size_t>::const_iterator packageSize =
- packageSizes.find(opt.getName());
- if (packageSize != packageSizes.end())
- size = packageSize->getValue();
-
- // Step through all the checkers in the package.
- for (auto checkEnd = i+size; i != checkEnd; ++i)
- if (opt.isEnabled())
- collected.insert(&*i);
- else
- collected.remove(&*i);
-}
-
-void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
- StringRef desc) {
- Checkers.push_back(CheckerInfo(fn, name, desc));
-
- // Record the presence of the checker in its packages.
- StringRef packageName, leafName;
- std::tie(packageName, leafName) = name.rsplit(PackageSeparator);
- while (!leafName.empty()) {
- Packages[packageName] += 1;
- std::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
- }
-}
-
-void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
- SmallVectorImpl<CheckerOptInfo> &opts) const {
- // Sort checkers for efficient collection.
- llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
-
- // Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers;
- for (auto &i : opts)
- collectCheckers(Checkers, Packages, i, enabledCheckers);
-
- // Initialize the CheckerManager with all enabled checkers.
- for (const auto *i :enabledCheckers) {
- checkerMgr.setCurrentCheckName(CheckName(i->FullName));
- i->Initialize(checkerMgr);
- }
-}
-
-void CheckerRegistry::validateCheckerOptions(const AnalyzerOptions &opts,
- DiagnosticsEngine &diags) const {
- for (const auto &config : opts.Config) {
- size_t pos = config.getKey().find(':');
- if (pos == StringRef::npos)
- continue;
-
- bool hasChecker = false;
- StringRef checkerName = config.getKey().substr(0, pos);
- for (const auto &checker : Checkers) {
- if (checker.FullName.startswith(checkerName) &&
- (checker.FullName.size() == pos || checker.FullName[pos] == '.')) {
- hasChecker = true;
- break;
- }
- }
- if (!hasChecker)
- diags.Report(diag::err_unknown_analyzer_checker) << checkerName;
- }
-}
-
-void CheckerRegistry::printHelp(raw_ostream &out,
- size_t maxNameChars) const {
- // FIXME: Alphabetical sort puts 'experimental' in the middle.
- // Would it be better to name it '~experimental' or something else
- // that's ASCIIbetically last?
- llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
-
- // FIXME: Print available packages.
-
- out << "CHECKERS:\n";
-
- // Find the maximum option length.
- size_t optionFieldWidth = 0;
- for (const auto &i : Checkers) {
- // Limit the amount of padding we are willing to give up for alignment.
- // Package.Name Description [Hidden]
- size_t nameLength = i.FullName.size();
- if (nameLength <= maxNameChars)
- optionFieldWidth = std::max(optionFieldWidth, nameLength);
- }
-
- const size_t initialPad = 2;
- for (const auto &i : Checkers) {
- out.indent(initialPad) << i.FullName;
-
- int pad = optionFieldWidth - i.FullName.size();
-
- // Break on long option names.
- if (pad < 0) {
- out << '\n';
- pad = optionFieldWidth + initialPad;
- }
- out.indent(pad + 2) << i.Desc;
-
- out << '\n';
- }
-}
-
-void CheckerRegistry::printList(
- raw_ostream &out, SmallVectorImpl<CheckerOptInfo> &opts) const {
- llvm::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
-
- // Collect checkers enabled by the options.
- CheckerInfoSet enabledCheckers;
- for (auto &i : opts)
- collectCheckers(Checkers, Packages, i, enabledCheckers);
-
- for (const auto *i : enabledCheckers)
- out << i->FullName << '\n';
-}
diff --git a/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index 421dfa48c97b..cdae3ef0116a 100644
--- a/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -14,8 +14,8 @@ namespace clang { namespace ento { namespace categories {
const char * const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
const char * const LogicError = "Logic error";
-const char * const MemoryCoreFoundationObjectiveC =
- "Memory (Core Foundation/Objective-C)";
+const char * const MemoryRefCount =
+ "Memory (Core Foundation/Objective-C/OSObject)";
const char * const MemoryError = "Memory error";
const char * const UnixAPI = "Unix API";
}}}
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index c17b6aae37e2..196854cb09da 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -53,26 +53,28 @@ STATISTIC(NumPathsExplored,
// Core analysis engine.
//===----------------------------------------------------------------------===//
-static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
+static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts,
+ SubEngine &subengine) {
switch (Opts.getExplorationStrategy()) {
- case AnalyzerOptions::ExplorationStrategyKind::DFS:
+ case ExplorationStrategyKind::DFS:
return WorkList::makeDFS();
- case AnalyzerOptions::ExplorationStrategyKind::BFS:
+ case ExplorationStrategyKind::BFS:
return WorkList::makeBFS();
- case AnalyzerOptions::ExplorationStrategyKind::BFSBlockDFSContents:
+ case ExplorationStrategyKind::BFSBlockDFSContents:
return WorkList::makeBFSBlockDFSContents();
- case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirst:
+ case ExplorationStrategyKind::UnexploredFirst:
return WorkList::makeUnexploredFirst();
- case AnalyzerOptions::ExplorationStrategyKind::UnexploredFirstQueue:
+ case ExplorationStrategyKind::UnexploredFirstQueue:
return WorkList::makeUnexploredFirstPriorityQueue();
- default:
- llvm_unreachable("Unexpected case");
+ case ExplorationStrategyKind::UnexploredFirstLocationQueue:
+ return WorkList::makeUnexploredFirstPriorityLocationQueue();
}
+ llvm_unreachable("Unknown AnalyzerOptions::ExplorationStrategyKind");
}
CoreEngine::CoreEngine(SubEngine &subengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
- : SubEng(subengine), WList(generateWorkList(Opts)),
+ : SubEng(subengine), WList(generateWorkList(Opts, subengine)),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
@@ -146,7 +148,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
dispatchWorkItem(Node, Node->getLocation(), WU);
}
- SubEng.processEndWorklist(hasWorkRemaining());
+ SubEng.processEndWorklist();
return WList->hasWork();
}
@@ -223,8 +225,12 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
// Get return statement..
const ReturnStmt *RS = nullptr;
if (!L.getSrc()->empty()) {
- if (Optional<CFGStmt> LastStmt = L.getSrc()->back().getAs<CFGStmt>()) {
+ CFGElement LastElement = L.getSrc()->back();
+ if (Optional<CFGStmt> LastStmt = LastElement.getAs<CFGStmt>()) {
RS = dyn_cast<ReturnStmt>(LastStmt->getStmt());
+ } else if (Optional<CFGAutomaticObjDtor> AutoDtor =
+ LastElement.getAs<CFGAutomaticObjDtor>()) {
+ RS = dyn_cast<ReturnStmt>(AutoDtor->getTriggerStmt());
}
}
@@ -392,8 +398,8 @@ void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
assert(B->succ_size() == 2);
NodeBuilderContext Ctx(*this, B, Pred);
ExplodedNodeSet Dst;
- SubEng.processBranch(Cond, Term, Ctx, Pred, Dst,
- *(B->succ_begin()), *(B->succ_begin()+1));
+ SubEng.processBranch(Cond, Ctx, Pred, Dst, *(B->succ_begin()),
+ *(B->succ_begin() + 1));
// Enqueue the new frontier onto the worklist.
enqueue(Dst);
}
diff --git a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
index 530933916889..da7854df1def 100644
--- a/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
+++ b/lib/StaticAnalyzer/Core/DynamicTypeMap.cpp
@@ -77,5 +77,10 @@ void printDynamicTypeInfo(ProgramStateRef State, raw_ostream &Out,
}
}
+void *ProgramStateTrait<DynamicTypeMap>::GDMIndex() {
+ static int index = 0;
+ return &index;
+}
+
} // namespace ento
} // namespace clang
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index eccaee292c40..b45f93b6dde8 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -44,6 +44,9 @@ static const Expr *ignoreTransparentExprs(const Expr *E) {
case Stmt::ExprWithCleanupsClass:
E = cast<ExprWithCleanups>(E)->getSubExpr();
break;
+ case Stmt::ConstantExprClass:
+ E = cast<ConstantExpr>(E)->getSubExpr();
+ break;
case Stmt::CXXBindTemporaryExprClass:
E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
break;
@@ -89,6 +92,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::ExprWithCleanupsClass:
case Stmt::GenericSelectionExprClass:
case Stmt::OpaqueValueExprClass:
+ case Stmt::ConstantExprClass:
case Stmt::ParenExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
llvm_unreachable("Should have been handled by ignoreTransparentExprs");
@@ -189,11 +193,6 @@ EnvironmentManager::removeDeadBindings(Environment Env,
// Mark all symbols in the block expr's value live.
RSScaner.scan(X);
- continue;
- } else {
- SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI)
- SymReaper.maybeDead(*SI);
}
}
@@ -202,7 +201,9 @@ EnvironmentManager::removeDeadBindings(Environment Env,
}
void Environment::print(raw_ostream &Out, const char *NL,
- const char *Sep, const LocationContext *WithLC) const {
+ const char *Sep,
+ const ASTContext &Context,
+ const LocationContext *WithLC) const {
if (ExprBindings.isEmpty())
return;
@@ -222,10 +223,9 @@ void Environment::print(raw_ostream &Out, const char *NL,
assert(WithLC);
- LangOptions LO; // FIXME.
- PrintingPolicy PP(LO);
+ PrintingPolicy PP = Context.getPrintingPolicy();
- Out << NL << NL << "Expressions by stack frame:" << NL;
+ Out << NL << "Expressions by stack frame:" << NL;
WithLC->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
for (auto I : ExprBindings) {
if (I.first.getLocationContext() != LC)
@@ -234,8 +234,8 @@ void Environment::print(raw_ostream &Out, const char *NL,
const Stmt *S = I.first.getStmt();
assert(S != nullptr && "Expected non-null Stmt");
- Out << "(" << (const void *)LC << ',' << (const void *)S << ") ";
- S->printPretty(Out, nullptr, PP);
+ Out << "(LC" << LC->getID() << ", S" << S->getID(Context) << ") ";
+ S->printPretty(Out, /*Helper=*/nullptr, PP);
Out << " : " << I.second << NL;
}
});
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index ece103d9d09a..d6bcbb96b55f 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -36,23 +36,6 @@ using namespace clang;
using namespace ento;
//===----------------------------------------------------------------------===//
-// Node auditing.
-//===----------------------------------------------------------------------===//
-
-// An out of line virtual method to provide a home for the class vtable.
-ExplodedNode::Auditor::~Auditor() = default;
-
-#ifndef NDEBUG
-static ExplodedNode::Auditor* NodeAuditor = nullptr;
-#endif
-
-void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
-#ifndef NDEBUG
- NodeAuditor = A;
-#endif
-}
-
-//===----------------------------------------------------------------------===//
// Cleanup.
//===----------------------------------------------------------------------===//
@@ -224,9 +207,6 @@ void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
assert(!V->isSink());
Preds.addNode(V, G);
V->Succs.addNode(this, G);
-#ifndef NDEBUG
- if (NodeAuditor) NodeAuditor->AddEdge(V, this);
-#endif
}
void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
@@ -303,6 +283,16 @@ ExplodedNode * const *ExplodedNode::NodeGroup::end() const {
return Storage.getAddrOfPtr1() + 1;
}
+int64_t ExplodedNode::getID(ExplodedGraph *G) const {
+ return G->getAllocator().identifyKnownAlignedObject<ExplodedNode>(this);
+}
+
+bool ExplodedNode::isTrivial() const {
+ return pred_size() == 1 && succ_size() == 1 &&
+ getFirstPred()->getState()->getID() == getState()->getID() &&
+ getFirstPred()->succ_size() == 1;
+}
+
ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
ProgramStateRef State,
bool IsSink,
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 2b4bdd754fdb..151eef56fece 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -98,11 +98,12 @@ STATISTIC(NumMaxBlockCountReachedInInlined,
STATISTIC(NumTimesRetriedWithoutInlining,
"The # of times we re-evaluated a call without inlining");
-
//===----------------------------------------------------------------------===//
// Internal program state traits.
//===----------------------------------------------------------------------===//
+namespace {
+
// When modeling a C++ constructor, for a variety of reasons we need to track
// the location of the object for the duration of its ConstructionContext.
// ObjectsUnderConstruction maps statements within the construction context
@@ -137,9 +138,17 @@ public:
const ConstructionContextItem &getItem() const { return Impl.first; }
const LocationContext *getLocationContext() const { return Impl.second; }
+ ASTContext &getASTContext() const {
+ return getLocationContext()->getDecl()->getASTContext();
+ }
+
void print(llvm::raw_ostream &OS, PrinterHelper *Helper, PrintingPolicy &PP) {
- OS << '(' << getLocationContext() << ',' << getAnyASTNodePtr() << ','
- << getItem().getKindAsString();
+ OS << "(LC" << getLocationContext()->getID() << ',';
+ if (const Stmt *S = getItem().getStmtOrNull())
+ OS << 'S' << S->getID(getASTContext());
+ else
+ OS << 'I' << getItem().getCXXCtorInitializer()->getID(getASTContext());
+ OS << ',' << getItem().getKindAsString();
if (getItem().getKind() == ConstructionContextItem::ArgumentKind)
OS << " #" << getItem().getIndex();
OS << ") ";
@@ -164,6 +173,7 @@ public:
return Impl < RHS.Impl;
}
};
+} // namespace
typedef llvm::ImmutableMap<ConstructedObjectKey, SVal>
ObjectsUnderConstructionMap;
@@ -177,7 +187,7 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(ObjectsUnderConstruction,
static const char* TagProviderName = "ExprEngine";
ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
- AnalysisManager &mgr, bool gcEnabled,
+ AnalysisManager &mgr,
SetOfConstDecls *VisitedCalleesIn,
FunctionSummariesTy *FS,
InliningModes HowToInlineIn)
@@ -189,11 +199,11 @@ ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
this),
SymMgr(StateMgr.getSymbolManager()),
svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
- ObjCGCEnabled(gcEnabled), BR(mgr, *this),
+ BR(mgr, *this),
VisitedCallees(VisitedCalleesIn), HowToInline(HowToInlineIn) {
- unsigned TrimInterval = mgr.options.getGraphTrimInterval();
+ unsigned TrimInterval = mgr.options.GraphTrimInterval;
if (TrimInterval != 0) {
- // Enable eager node reclaimation when constructing the ExplodedGraph.
+ // Enable eager node reclamation when constructing the ExplodedGraph.
G.enableNodeReclamation(TrimInterval);
}
}
@@ -283,11 +293,10 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
return state;
}
-ProgramStateRef
-ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
- const LocationContext *LC,
- const Expr *InitWithAdjustments,
- const Expr *Result) {
+ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
+ ProgramStateRef State, const LocationContext *LC,
+ const Expr *InitWithAdjustments, const Expr *Result,
+ const SubRegion **OutRegionWithAdjustments) {
// FIXME: This function is a hack that works around the quirky AST
// we're often having with respect to C++ temporaries. If only we modelled
// the actual execution order of statements properly in the CFG,
@@ -297,8 +306,11 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
if (!Result) {
// If we don't have an explicit result expression, we're in "if needed"
// mode. Only create a region if the current value is a NonLoc.
- if (!InitValWithAdjustments.getAs<NonLoc>())
+ if (!InitValWithAdjustments.getAs<NonLoc>()) {
+ if (OutRegionWithAdjustments)
+ *OutRegionWithAdjustments = nullptr;
return State;
+ }
Result = InitWithAdjustments;
} else {
// We need to create a region no matter what. For sanity, make sure we don't
@@ -418,11 +430,17 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
// The result expression would now point to the correct sub-region of the
// newly created temporary region. Do this last in order to getSVal of Init
// correctly in case (Result == Init).
- State = State->BindExpr(Result, LC, Reg);
+ if (Result->isGLValue()) {
+ State = State->BindExpr(Result, LC, Reg);
+ } else {
+ State = State->BindExpr(Result, LC, InitValWithAdjustments);
+ }
// Notify checkers once for two bindLoc()s.
State = processRegionChange(State, TR, LC);
+ if (OutRegionWithAdjustments)
+ *OutRegionWithAdjustments = cast<SubRegion>(Reg.getAsRegion());
return State;
}
@@ -523,7 +541,6 @@ ExprEngine::processRegionChanges(ProgramStateRef state,
static void printObjectsUnderConstructionForContext(raw_ostream &Out,
ProgramStateRef State,
const char *NL,
- const char *Sep,
const LocationContext *LC) {
PrintingPolicy PP =
LC->getAnalysisDeclContext()->getASTContext().getPrintingPolicy();
@@ -545,7 +562,7 @@ void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
Out << Sep << "Objects under construction:" << NL;
LCtx->dumpStack(Out, "", NL, Sep, [&](const LocationContext *LC) {
- printObjectsUnderConstructionForContext(Out, State, NL, Sep, LC);
+ printObjectsUnderConstructionForContext(Out, State, NL, LC);
});
}
}
@@ -553,7 +570,7 @@ void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
}
-void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
+void ExprEngine::processEndWorklist() {
getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
}
@@ -666,44 +683,35 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
// Process any special transfer function for dead symbols.
// A tag to track convenience transitions, which can be removed at cleanup.
static SimpleProgramPointTag cleanupTag(TagProviderName, "Clean Node");
- if (!SymReaper.hasDeadSymbols()) {
- // Generate a CleanedNode that has the environment and store cleaned
- // up. Since no symbols are dead, we can optimize and not clean out
- // the constraint manager.
- StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx);
- Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K);
-
- } else {
- // Call checkers with the non-cleaned state so that they could query the
- // values of the soon to be dead symbols.
- ExplodedNodeSet CheckedSet;
- getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
- DiagnosticStmt, *this, K);
-
- // For each node in CheckedSet, generate CleanedNodes that have the
- // environment, the store, and the constraints cleaned up but have the
- // user-supplied states as the predecessors.
- StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
- for (const auto I : CheckedSet) {
- ProgramStateRef CheckerState = I->getState();
-
- // The constraint manager has not been cleaned up yet, so clean up now.
- CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
- SymReaper);
-
- assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
- "Checkers are not allowed to modify the Environment as a part of "
- "checkDeadSymbols processing.");
- assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
- "Checkers are not allowed to modify the Store as a part of "
- "checkDeadSymbols processing.");
-
- // Create a state based on CleanedState with CheckerState GDM and
- // generate a transition to that state.
- ProgramStateRef CleanedCheckerSt =
+ // Call checkers with the non-cleaned state so that they could query the
+ // values of the soon to be dead symbols.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
+ DiagnosticStmt, *this, K);
+
+ // For each node in CheckedSet, generate CleanedNodes that have the
+ // environment, the store, and the constraints cleaned up but have the
+ // user-supplied states as the predecessors.
+ StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
+ for (const auto I : CheckedSet) {
+ ProgramStateRef CheckerState = I->getState();
+
+ // The constraint manager has not been cleaned up yet, so clean up now.
+ CheckerState =
+ getConstraintManager().removeDeadBindings(CheckerState, SymReaper);
+
+ assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Environment as a part of "
+ "checkDeadSymbols processing.");
+ assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
+ "Checkers are not allowed to modify the Store as a part of "
+ "checkDeadSymbols processing.");
+
+ // Create a state based on CleanedState with CheckerState GDM and
+ // generate a transition to that state.
+ ProgramStateRef CleanedCheckerSt =
StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
- Bldr.generateNode(DiagnosticStmt, I, CleanedCheckerSt, &cleanupTag, K);
- }
+ Bldr.generateNode(DiagnosticStmt, I, CleanedCheckerSt, &cleanupTag, K);
}
}
@@ -712,7 +720,7 @@ void ExprEngine::ProcessStmt(const Stmt *currStmt, ExplodedNode *Pred) {
G.reclaimRecentlyAllocatedNodes();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- currStmt->getLocStart(),
+ currStmt->getBeginLoc(),
"Error evaluating statement");
// Remove dead bindings and symbols.
@@ -739,14 +747,14 @@ void ExprEngine::ProcessStmt(const Stmt *currStmt, ExplodedNode *Pred) {
void ExprEngine::ProcessLoopExit(const Stmt* S, ExplodedNode *Pred) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- S->getLocStart(),
+ S->getBeginLoc(),
"Error evaluating end of the loop");
ExplodedNodeSet Dst;
Dst.Add(Pred);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
ProgramStateRef NewState = Pred->getState();
- if(AMgr.options.shouldUnrollLoops())
+ if(AMgr.options.ShouldUnrollLoops)
NewState = processLoopEnd(S, NewState);
LoopExit PP(S, Pred->getLocationContext());
@@ -878,12 +886,12 @@ void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
// TODO: We're not evaluating allocators for all cases just yet as
// we're not handling the return value correctly, which causes false
// positives when the alpha.cplusplus.NewDeleteLeaks check is on.
- if (Opts.mayInlineCXXAllocator())
+ if (Opts.MayInlineCXXAllocator)
VisitCXXNewAllocatorCall(NE, Pred, Dst);
else {
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
const LocationContext *LCtx = Pred->getLocationContext();
- PostImplicitCall PP(NE->getOperatorNew(), NE->getLocStart(), LCtx);
+ PostImplicitCall PP(NE->getOperatorNew(), NE->getBeginLoc(), LCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
}
Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
@@ -940,7 +948,7 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
const CXXRecordDecl *RD = BTy->getAsCXXRecordDecl();
const CXXDestructorDecl *Dtor = RD->getDestructor();
- PostImplicitCall PP(Dtor, DE->getLocStart(), LCtx);
+ PostImplicitCall PP(Dtor, DE->getBeginLoc(), LCtx);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
@@ -1025,13 +1033,13 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
MR = V->getAsRegion();
}
- // If copy elision has occured, and the constructor corresponding to the
+ // If copy elision has occurred, and the constructor corresponding to the
// destructor was elided, we need to skip the destructor as well.
if (isDestructorElided(State, BTE, LC)) {
State = cleanupElidedDestructor(State, BTE, LC);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
PostImplicitCall PP(D.getDestructorDecl(getContext()),
- D.getBindTemporaryExpr()->getLocStart(),
+ D.getBindTemporaryExpr()->getBeginLoc(),
Pred->getLocationContext());
Bldr.generateNode(PP, State, Pred);
return;
@@ -1093,7 +1101,7 @@ void ExprEngine::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE,
// This is a fallback solution in case we didn't have a construction
// context when we were constructing the temporary. Otherwise the map should
// have been populated there.
- if (!getAnalysisManager().options.includeTemporaryDtorsInCFG()) {
+ if (!getAnalysisManager().options.ShouldIncludeTemporaryDtorsInCFG) {
// In case we don't have temporary destructors in the CFG, do not mark
// the initialization - we would otherwise never clean it up.
Dst = PreVisit;
@@ -1120,7 +1128,7 @@ ProgramStateRef ExprEngine::escapeValue(ProgramStateRef State, SVal V,
InvalidatedSymbols Symbols;
public:
- explicit CollectReachableSymbolsCallback(ProgramStateRef State) {}
+ explicit CollectReachableSymbolsCallback(ProgramStateRef) {}
const InvalidatedSymbols &getSymbols() const { return Symbols; }
@@ -1139,8 +1147,7 @@ ProgramStateRef ExprEngine::escapeValue(ProgramStateRef State, SVal V,
void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
ExplodedNodeSet &DstTop) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- S->getLocStart(),
- "Error evaluating statement");
+ S->getBeginLoc(), "Error evaluating statement");
ExplodedNodeSet Dst;
StmtNodeBuilder Bldr(Pred, DstTop, *currBldrCtx);
@@ -1274,6 +1281,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.addNodes(Dst);
break;
+ case Expr::ConstantExprClass:
case Stmt::ExprWithCleanupsClass:
// Handled due to fully linearised CFG.
break;
@@ -1454,7 +1462,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
case Stmt::LambdaExprClass:
- if (AMgr.options.shouldInlineLambdas()) {
+ if (AMgr.options.ShouldInlineLambdas) {
Bldr.takeNodes(Pred);
VisitLambdaExpr(cast<LambdaExpr>(S), Pred, Dst);
Bldr.addNodes(Dst);
@@ -1483,7 +1491,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
Bldr.takeNodes(Pred);
- if (AMgr.options.eagerlyAssumeBinOpBifurcation &&
+ if (AMgr.options.ShouldEagerlyAssume &&
(B->isRelationalOp() || B->isEqualityOp())) {
ExplodedNodeSet Tmp;
VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
@@ -1747,7 +1755,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::UnaryOperatorClass: {
Bldr.takeNodes(Pred);
const auto *U = cast<UnaryOperator>(S);
- if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) {
+ if (AMgr.options.ShouldEagerlyAssume && (U->getOpcode() == UO_LNot)) {
ExplodedNodeSet Tmp;
VisitUnaryOperator(U, Pred, Tmp);
evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U);
@@ -1848,7 +1856,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
PrettyStackTraceLocationContext CrashInfo(Pred->getLocationContext());
// If we reach a loop which has a known bound (and meets
// other constraints) then consider completely unrolling it.
- if(AMgr.options.shouldUnrollLoops()) {
+ if(AMgr.options.ShouldUnrollLoops) {
unsigned maxBlockVisitOnPath = AMgr.options.maxBlockVisitOnPath;
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
if (Term) {
@@ -1870,7 +1878,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
// maximum number of times, widen the loop.
unsigned int BlockCount = nodeBuilder.getContext().blockCount();
if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
- AMgr.options.shouldWidenLoops()) {
+ AMgr.options.ShouldWidenLoops) {
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminator();
if (!(Term &&
(isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
@@ -1923,8 +1931,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
/// integers that promote their values (which are currently not tracked well).
/// This function returns the SVal bound to Condition->IgnoreCasts if all the
// cast(s) did was sign-extend the original value.
-static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
- ProgramStateRef state,
+static SVal RecoverCastedSymbol(ProgramStateRef state,
const Stmt *Condition,
const LocationContext *LCtx,
ASTContext &Ctx) {
@@ -2021,7 +2028,7 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
llvm_unreachable("could not resolve condition");
}
-void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
+void ExprEngine::processBranch(const Stmt *Condition,
NodeBuilderContext& BldCtx,
ExplodedNode *Pred,
ExplodedNodeSet &Dst,
@@ -2046,7 +2053,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
Condition = ResolveCondition(Condition, BldCtx.getBlock());
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- Condition->getLocStart(),
+ Condition->getBeginLoc(),
"Error evaluating branch");
ExplodedNodeSet CheckersOutSet;
@@ -2072,8 +2079,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
// integers that promote their values are currently not tracked well.
// If 'Condition' is such an expression, try and recover the
// underlying value and use that instead.
- SVal recovered = RecoverCastedSymbol(getStateManager(),
- PrevState, Condition,
+ SVal recovered = RecoverCastedSymbol(PrevState, Condition,
PredI->getLocationContext(),
getContext());
@@ -2200,17 +2206,21 @@ void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
ExplodedNode *Pred,
const ReturnStmt *RS) {
+ ProgramStateRef State = Pred->getState();
+
+ if (!Pred->getStackFrame()->inTopFrame())
+ State = finishArgumentConstruction(
+ State, *getStateManager().getCallEventManager().getCaller(
+ Pred->getStackFrame(), Pred->getState()));
+
// FIXME: We currently cannot assert that temporaries are clear, because
// lifetime extended temporaries are not always modelled correctly. In some
// cases when we materialize the temporary, we do
// createTemporaryRegionIfNeeded(), and the region changes, and also the
// respective destructor becomes automatic from temporary. So for now clean up
- // the state manually before asserting. Ideally, the code above the assertion
- // should go away, but the assertion should remain.
+ // the state manually before asserting. Ideally, this braced block of code
+ // should go away.
{
- ExplodedNodeSet CleanUpObjects;
- NodeBuilder Bldr(Pred, CleanUpObjects, BC);
- ProgramStateRef State = Pred->getState();
const LocationContext *FromLC = Pred->getLocationContext();
const LocationContext *ToLC = FromLC->getStackFrame()->getParent();
const LocationContext *LC = FromLC;
@@ -2229,15 +2239,20 @@ void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
}
LC = LC->getParent();
}
- if (State != Pred->getState()) {
- Pred = Bldr.generateNode(Pred->getLocation(), State, Pred);
- if (!Pred) {
- // The node with clean temporaries already exists. We might have reached
- // it on a path on which we initialize different temporaries.
- return;
- }
+ }
+
+ // Perform the transition with cleanups.
+ if (State != Pred->getState()) {
+ ExplodedNodeSet PostCleanup;
+ NodeBuilder Bldr(Pred, PostCleanup, BC);
+ Pred = Bldr.generateNode(Pred->getLocation(), State, Pred);
+ if (!Pred) {
+ // The node with clean temporaries already exists. We might have reached
+ // it on a path on which we initialize different temporaries.
+ return;
}
}
+
assert(areAllObjectsFullyConstructed(Pred->getState(),
Pred->getLocationContext(),
Pred->getStackFrame()->getParent()));
@@ -2364,7 +2379,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
Optional<std::pair<SVal, QualType>> VInfo;
- if (AMgr.options.shouldInlineLambdas() && DeclRefEx &&
+ if (AMgr.options.ShouldInlineLambdas && DeclRefEx &&
DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
MD->getParent()->isLambda()) {
// Lookup the field of the lambda.
@@ -2524,8 +2539,12 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
}
// Handle regular struct fields / member variables.
- state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
- SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
+ const SubRegion *MR = nullptr;
+ state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr,
+ /*Result=*/nullptr,
+ /*OutRegionWithAdjustments=*/&MR);
+ SVal baseExprVal =
+ MR ? loc::MemRegionVal(MR) : state->getSVal(BaseExpr, LCtx);
const auto *field = cast<FieldDecl>(Member);
SVal L = state->getLValue(field, baseExprVal);
@@ -2645,7 +2664,6 @@ ProgramStateRef
ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
- ArrayRef<const MemRegion *> Regions,
const CallEvent *Call,
RegionAndSymbolInvalidationTraits &ITraits) {
if (!Invalidated || Invalidated->empty())
@@ -2755,7 +2773,7 @@ void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
+ evalLocation(Tmp, AssignE, LocationE, Pred, state, location, false);
if (Tmp.empty())
return;
@@ -2780,7 +2798,7 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
assert(BoundEx);
// Evaluate the location (checks for bad dereferences).
ExplodedNodeSet Tmp;
- evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, tag, true);
+ evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, true);
if (Tmp.empty())
return;
@@ -2811,7 +2829,6 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
ExplodedNode *Pred,
ProgramStateRef state,
SVal location,
- const ProgramPointTag *tag,
bool isLoad) {
StmtNodeBuilder BldrTop(Pred, Dst, *currBldrCtx);
// Early checks for performance reason.
@@ -2927,211 +2944,108 @@ void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
-static ExprEngine* GraphPrintCheckerState;
-static SourceManager* GraphPrintSourceManager;
-
namespace llvm {
template<>
-struct DOTGraphTraits<ExplodedNode*> : public DefaultDOTGraphTraits {
+struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
- // FIXME: Since we do not cache error nodes in ExprEngine now, this does not
- // work.
- static std::string getNodeAttributes(const ExplodedNode *N, void*) {
- return {};
- }
-
- // De-duplicate some source location pretty-printing.
- static void printLocation(raw_ostream &Out, SourceLocation SLoc) {
- if (SLoc.isFileID()) {
- Out << "\\lline="
- << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
- << " col="
- << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
- << "\\l";
- }
- }
-
- static std::string getNodeLabel(const ExplodedNode *N, void*){
- std::string sbuf;
- llvm::raw_string_ostream Out(sbuf);
-
- // Program Location.
- ProgramPoint Loc = N->getLocation();
-
- switch (Loc.getKind()) {
- case ProgramPoint::BlockEntranceKind:
- Out << "Block Entrance: B"
- << Loc.castAs<BlockEntrance>().getBlock()->getBlockID();
- break;
-
- case ProgramPoint::BlockExitKind:
- assert(false);
- break;
-
- case ProgramPoint::CallEnterKind:
- Out << "CallEnter";
- break;
-
- case ProgramPoint::CallExitBeginKind:
- Out << "CallExitBegin";
- break;
+ static bool nodeHasBugReport(const ExplodedNode *N) {
+ BugReporter &BR = static_cast<ExprEngine &>(
+ N->getState()->getStateManager().getOwningEngine()).getBugReporter();
- case ProgramPoint::CallExitEndKind:
- Out << "CallExitEnd";
- break;
-
- case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
- Out << "PostStmtPurgeDeadSymbols";
- break;
-
- case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
- Out << "PreStmtPurgeDeadSymbols";
- break;
-
- case ProgramPoint::EpsilonKind:
- Out << "Epsilon Point";
- break;
+ const auto EQClasses =
+ llvm::make_range(BR.EQClasses_begin(), BR.EQClasses_end());
- case ProgramPoint::LoopExitKind: {
- LoopExit LE = Loc.castAs<LoopExit>();
- Out << "LoopExit: " << LE.getLoopStmt()->getStmtClassName();
- break;
+ for (const auto &EQ : EQClasses) {
+ for (const BugReport &Report : EQ) {
+ if (Report.getErrorNode() == N)
+ return true;
}
+ }
+ return false;
+ }
- case ProgramPoint::PreImplicitCallKind: {
- ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
- Out << "PreCall: ";
-
- // FIXME: Get proper printing options.
- PC.getDecl()->print(Out, LangOptions());
- printLocation(Out, PC.getLocation());
- break;
- }
-
- case ProgramPoint::PostImplicitCallKind: {
- ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
- Out << "PostCall: ";
-
- // FIXME: Get proper printing options.
- PC.getDecl()->print(Out, LangOptions());
- printLocation(Out, PC.getLocation());
- break;
- }
-
- case ProgramPoint::PostInitializerKind: {
- Out << "PostInitializer: ";
- const CXXCtorInitializer *Init =
- Loc.castAs<PostInitializer>().getInitializer();
- if (const FieldDecl *FD = Init->getAnyMember())
- Out << *FD;
- else {
- QualType Ty = Init->getTypeSourceInfo()->getType();
- Ty = Ty.getLocalUnqualifiedType();
- LangOptions LO; // FIXME.
- Ty.print(Out, LO);
- }
- break;
- }
-
- case ProgramPoint::BlockEdgeKind: {
- const BlockEdge &E = Loc.castAs<BlockEdge>();
- Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
- << E.getDst()->getBlockID() << ')';
-
- if (const Stmt *T = E.getSrc()->getTerminator()) {
- SourceLocation SLoc = T->getLocStart();
-
- Out << "\\|Terminator: ";
- LangOptions LO; // FIXME.
- E.getSrc()->printTerminator(Out, LO);
-
- if (SLoc.isFileID()) {
- Out << "\\lline="
- << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
- << " col="
- << GraphPrintSourceManager->getExpansionColumnNumber(SLoc);
- }
-
- if (isa<SwitchStmt>(T)) {
- const Stmt *Label = E.getDst()->getLabel();
-
- if (Label) {
- if (const auto *C = dyn_cast<CaseStmt>(Label)) {
- Out << "\\lcase ";
- LangOptions LO; // FIXME.
- if (C->getLHS())
- C->getLHS()->printPretty(Out, nullptr, PrintingPolicy(LO));
-
- if (const Stmt *RHS = C->getRHS()) {
- Out << " .. ";
- RHS->printPretty(Out, nullptr, PrintingPolicy(LO));
- }
-
- Out << ":";
- }
- else {
- assert(isa<DefaultStmt>(Label));
- Out << "\\ldefault:";
- }
- }
- else
- Out << "\\l(implicit) default:";
- }
- else if (isa<IndirectGotoStmt>(T)) {
- // FIXME
- }
- else {
- Out << "\\lCondition: ";
- if (*E.getSrc()->succ_begin() == E.getDst())
- Out << "true";
- else
- Out << "false";
- }
-
- Out << "\\l";
- }
+ /// \p PreCallback: callback before break.
+ /// \p PostCallback: callback after break.
+ /// \p Stop: stop iteration if returns {@code true}
+ /// \return Whether {@code Stop} ever returned {@code true}.
+ static bool traverseHiddenNodes(
+ const ExplodedNode *N,
+ llvm::function_ref<void(const ExplodedNode *)> PreCallback,
+ llvm::function_ref<void(const ExplodedNode *)> PostCallback,
+ llvm::function_ref<bool(const ExplodedNode *)> Stop) {
+ const ExplodedNode *FirstHiddenNode = N;
+ while (FirstHiddenNode->pred_size() == 1 &&
+ isNodeHidden(*FirstHiddenNode->pred_begin())) {
+ FirstHiddenNode = *FirstHiddenNode->pred_begin();
+ }
+ const ExplodedNode *OtherNode = FirstHiddenNode;
+ while (true) {
+ PreCallback(OtherNode);
+ if (Stop(OtherNode))
+ return true;
+ if (OtherNode == N)
break;
- }
+ PostCallback(OtherNode);
- default: {
- const Stmt *S = Loc.castAs<StmtPoint>().getStmt();
- assert(S != nullptr && "Expecting non-null Stmt");
-
- Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
- LangOptions LO; // FIXME.
- S->printPretty(Out, nullptr, PrintingPolicy(LO));
- printLocation(Out, S->getLocStart());
-
- if (Loc.getAs<PreStmt>())
- Out << "\\lPreStmt\\l;";
- else if (Loc.getAs<PostLoad>())
- Out << "\\lPostLoad\\l;";
- else if (Loc.getAs<PostStore>())
- Out << "\\lPostStore\\l";
- else if (Loc.getAs<PostLValue>())
- Out << "\\lPostLValue\\l";
- else if (Loc.getAs<PostAllocatorCall>())
- Out << "\\lPostAllocatorCall\\l";
+ OtherNode = *OtherNode->succ_begin();
+ }
+ return false;
+ }
- break;
- }
+ static std::string getNodeAttributes(const ExplodedNode *N,
+ ExplodedGraph *) {
+ SmallVector<StringRef, 10> Out;
+ auto Noop = [](const ExplodedNode*){};
+ if (traverseHiddenNodes(N, Noop, Noop, &nodeHasBugReport)) {
+ Out.push_back("style=filled");
+ Out.push_back("fillcolor=red");
}
- ProgramStateRef state = N->getState();
- Out << "\\|StateID: " << (const void*) state.get()
- << " NodeID: " << (const void*) N << "\\|";
+ if (traverseHiddenNodes(N, Noop, Noop,
+ [](const ExplodedNode *C) { return C->isSink(); }))
+ Out.push_back("color=blue");
+ return llvm::join(Out, ",");
+ }
- state->printDOT(Out, N->getLocationContext());
+ static bool isNodeHidden(const ExplodedNode *N) {
+ return N->isTrivial();
+ }
- Out << "\\l";
+ static std::string getNodeLabel(const ExplodedNode *N, ExplodedGraph *G){
+ std::string sbuf;
+ llvm::raw_string_ostream Out(sbuf);
- if (const ProgramPointTag *tag = Loc.getTag()) {
- Out << "\\|Tag: " << tag->getTagDescription();
- Out << "\\l";
- }
+ ProgramStateRef State = N->getState();
+
+ // Dump program point for all the previously skipped nodes.
+ traverseHiddenNodes(
+ N,
+ [&](const ExplodedNode *OtherNode) {
+ OtherNode->getLocation().print(/*CR=*/"\\l", Out);
+ if (const ProgramPointTag *Tag = OtherNode->getLocation().getTag())
+ Out << "\\lTag:" << Tag->getTagDescription();
+ if (N->isSink())
+ Out << "\\lNode is sink\\l";
+ if (nodeHasBugReport(N))
+ Out << "\\lBug report attached\\l";
+ },
+ [&](const ExplodedNode *) { Out << "\\l--------\\l"; },
+ [&](const ExplodedNode *) { return false; });
+
+ Out << "\\l\\|";
+
+ Out << "StateID: ST" << State->getID() << ", NodeID: N" << N->getID(G)
+ << " <" << (const void *)N << ">\\|";
+
+ bool SameAsAllPredecessors =
+ std::all_of(N->pred_begin(), N->pred_end(), [&](const ExplodedNode *P) {
+ return P->getState() == State;
+ });
+ if (!SameAsAllPredecessors)
+ State->printDOT(Out, N->getLocationContext());
return Out.str();
}
};
@@ -3141,48 +3055,61 @@ struct DOTGraphTraits<ExplodedNode*> : public DefaultDOTGraphTraits {
void ExprEngine::ViewGraph(bool trim) {
#ifndef NDEBUG
+ std::string Filename = DumpGraph(trim);
+ llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
+#endif
+ llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
+}
+
+
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
+#ifndef NDEBUG
+ std::string Filename = DumpGraph(Nodes);
+ llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
+#endif
+ llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
+}
+
+std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
+#ifndef NDEBUG
if (trim) {
std::vector<const ExplodedNode *> Src;
- // Flush any outstanding reports to make sure we cover all the nodes.
- // This does not cause them to get displayed.
- for (const auto I : BR)
- const_cast<BugType *>(I)->FlushReports(BR);
-
// Iterate through the reports and get their nodes.
for (BugReporter::EQClasses_iterator
EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
const auto *N = const_cast<ExplodedNode *>(EI->begin()->getErrorNode());
if (N) Src.push_back(N);
}
-
- ViewGraph(Src);
- }
- else {
- GraphPrintCheckerState = this;
- GraphPrintSourceManager = &getContext().getSourceManager();
-
- llvm::ViewGraph(*G.roots_begin(), "ExprEngine");
-
- GraphPrintCheckerState = nullptr;
- GraphPrintSourceManager = nullptr;
+ return DumpGraph(Src, Filename);
+ } else {
+ return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
+ /*Title=*/"Exploded Graph", /*Filename=*/Filename);
}
#endif
+ llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
+ return "";
}
-void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
+std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
+ StringRef Filename) {
#ifndef NDEBUG
- GraphPrintCheckerState = this;
- GraphPrintSourceManager = &getContext().getSourceManager();
-
std::unique_ptr<ExplodedGraph> TrimmedG(G.trim(Nodes));
- if (!TrimmedG.get())
+ if (!TrimmedG.get()) {
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
- else
- llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedExprEngine");
-
- GraphPrintCheckerState = nullptr;
- GraphPrintSourceManager = nullptr;
+ } else {
+ return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
+ /*ShortNames=*/false,
+ /*Title=*/"Trimmed Exploded Graph",
+ /*Filename=*/Filename);
+ }
#endif
+ llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
+ return "";
+}
+
+void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
+ static int index = 0;
+ return &index;
}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 61b7a290e42a..b980628878e9 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -412,10 +412,11 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
- case CK_ZeroToOCLEvent:
- case CK_ZeroToOCLQueue:
+ case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
- case CK_LValueBitCast: {
+ case CK_LValueBitCast:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean: {
state =
handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
continue;
@@ -809,8 +810,9 @@ void ExprEngine::
VisitOffsetOfExpr(const OffsetOfExpr *OOE,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
- APSInt IV;
- if (OOE->EvaluateAsInt(IV, getContext())) {
+ Expr::EvalResult Result;
+ if (OOE->EvaluateAsInt(Result, getContext())) {
+ APSInt IV = Result.Val.getInt();
assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
assert(OOE->getType()->isBuiltinType());
assert(OOE->getType()->getAs<BuiltinType>()->isInteger());
@@ -956,7 +958,7 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
}
case UO_Plus:
assert(!U->isGLValue());
- // FALL-THROUGH.
+ LLVM_FALLTHROUGH;
case UO_Deref:
case UO_Extension: {
handleUOExtension(I, U, Bldr);
@@ -1050,7 +1052,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// Perform the store, so that the uninitialized value detection happens.
Bldr.takeNodes(*I);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, U, *I, state, loc, V2_untested);
+ evalStore(Dst3, U, Ex, *I, state, loc, V2_untested);
Bldr.addNodes(Dst3);
continue;
@@ -1118,7 +1120,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
// Perform the store.
Bldr.takeNodes(*I);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, U, *I, state, loc, Result);
+ evalStore(Dst3, U, Ex, *I, state, loc, Result);
Bldr.addNodes(Dst3);
}
Dst.insert(Dst2);
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 4f1766a813c6..6445b9df5a58 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -113,7 +113,9 @@ SVal ExprEngine::makeZeroElementRegion(ProgramStateRef State, SVal LValue,
std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
const ConstructionContext *CC, EvalCallOptions &CallOpts) {
- MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
+ SValBuilder &SVB = getSValBuilder();
+ MemRegionManager &MRMgr = SVB.getRegionManager();
+ ASTContext &ACtx = SVB.getContext();
// See if we're constructing an existing region by looking at the
// current construction context.
@@ -139,7 +141,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
assert(Init->isAnyMemberInitializer());
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr =
- getSValBuilder().getCXXThis(CurCtor, LCtx->getStackFrame());
+ SVB.getCXXThis(CurCtor, LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
const ValueDecl *Field;
@@ -159,7 +161,7 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
return std::make_pair(State, FieldVal);
}
case ConstructionContext::NewAllocatedObjectKind: {
- if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
const auto *NECC = cast<NewAllocatedObjectConstructionContext>(CC);
const auto *NE = NECC->getCXXNewExpr();
SVal V = *getObjectUnderConstruction(State, NE, LCtx);
@@ -199,18 +201,31 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
RTC->getConstructionContext(), CallOpts);
} else {
- // We are on the top frame of the analysis.
- // TODO: What exactly happens when we are? Does the temporary object
- // live long enough in the region store in this case? Would checkers
- // think that this object immediately goes out of scope?
- CallOpts.IsTemporaryCtorOrDtor = true;
- SVal V = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
+ // We are on the top frame of the analysis. We do not know where is the
+ // object returned to. Conjure a symbolic region for the return value.
+ // TODO: We probably need a new MemRegion kind to represent the storage
+ // of that SymbolicRegion, so that we cound produce a fancy symbol
+ // instead of an anonymous conjured symbol.
+ // TODO: Do we need to track the region to avoid having it dead
+ // too early? It does die too early, at least in C++17, but because
+ // putting anything into a SymbolicRegion causes an immediate escape,
+ // it doesn't cause any leak false positives.
+ const auto *RCC = cast<ReturnedValueConstructionContext>(CC);
+ // Make sure that this doesn't coincide with any other symbol
+ // conjured for the returned expression.
+ static const int TopLevelSymRegionTag = 0;
+ const Expr *RetE = RCC->getReturnStmt()->getRetValue();
+ assert(RetE && "Void returns should not have a construction context");
+ QualType ReturnTy = RetE->getType();
+ QualType RegionTy = ACtx.getPointerType(ReturnTy);
+ SVal V = SVB.conjureSymbolVal(&TopLevelSymRegionTag, RetE, SFC,
+ RegionTy, currBldrCtx->blockCount());
return std::make_pair(State, V);
}
llvm_unreachable("Unhandled return value construction context!");
}
case ConstructionContext::ElidedTemporaryObjectKind: {
- assert(AMgr.getAnalyzerOptions().shouldElideConstructors());
+ assert(AMgr.getAnalyzerOptions().ShouldElideConstructors);
const auto *TCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
const CXXBindTemporaryExpr *BTE = TCC->getCXXBindTemporaryExpr();
const MaterializeTemporaryExpr *MTE = TCC->getMaterializedTemporaryExpr();
@@ -292,8 +307,75 @@ std::pair<ProgramStateRef, SVal> ExprEngine::prepareForObjectConstruction(
return std::make_pair(State, V);
}
case ConstructionContext::ArgumentKind: {
- // Function argument constructors. Not implemented yet.
- break;
+ // Arguments are technically temporaries.
+ CallOpts.IsTemporaryCtorOrDtor = true;
+
+ const auto *ACC = cast<ArgumentConstructionContext>(CC);
+ const Expr *E = ACC->getCallLikeExpr();
+ unsigned Idx = ACC->getIndex();
+ const CXXBindTemporaryExpr *BTE = ACC->getCXXBindTemporaryExpr();
+
+ CallEventManager &CEMgr = getStateManager().getCallEventManager();
+ SVal V = UnknownVal();
+ auto getArgLoc = [&](CallEventRef<> Caller) -> Optional<SVal> {
+ const LocationContext *FutureSFC = Caller->getCalleeStackFrame();
+ // Return early if we are unable to reliably foresee
+ // the future stack frame.
+ if (!FutureSFC)
+ return None;
+
+ // This should be equivalent to Caller->getDecl() for now, but
+ // FutureSFC->getDecl() is likely to support better stuff (like
+ // virtual functions) earlier.
+ const Decl *CalleeD = FutureSFC->getDecl();
+
+ // FIXME: Support for variadic arguments is not implemented here yet.
+ if (CallEvent::isVariadic(CalleeD))
+ return None;
+
+ // Operator arguments do not correspond to operator parameters
+ // because this-argument is implemented as a normal argument in
+ // operator call expressions but not in operator declarations.
+ const VarRegion *VR = Caller->getParameterLocation(
+ *Caller->getAdjustedParameterIndex(Idx));
+ if (!VR)
+ return None;
+
+ return loc::MemRegionVal(VR);
+ };
+
+ if (const auto *CE = dyn_cast<CallExpr>(E)) {
+ CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
+ if (auto OptV = getArgLoc(Caller))
+ V = *OptV;
+ else
+ break;
+ State = addObjectUnderConstruction(State, {CE, Idx}, LCtx, V);
+ } else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
+ // Don't bother figuring out the target region for the future
+ // constructor because we won't need it.
+ CallEventRef<> Caller =
+ CEMgr.getCXXConstructorCall(CCE, /*Target=*/nullptr, State, LCtx);
+ if (auto OptV = getArgLoc(Caller))
+ V = *OptV;
+ else
+ break;
+ State = addObjectUnderConstruction(State, {CCE, Idx}, LCtx, V);
+ } else if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ CallEventRef<> Caller = CEMgr.getObjCMethodCall(ME, State, LCtx);
+ if (auto OptV = getArgLoc(Caller))
+ V = *OptV;
+ else
+ break;
+ State = addObjectUnderConstruction(State, {ME, Idx}, LCtx, V);
+ }
+
+ assert(!V.isUnknown());
+
+ if (BTE)
+ State = addObjectUnderConstruction(State, BTE, LCtx, V);
+
+ return std::make_pair(State, V);
}
}
}
@@ -359,7 +441,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
}
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_NonVirtualBase:
// In C++17, classes with non-virtual bases may be aggregates, so they would
// be initialized as aggregates without a constructor call, so we may have
@@ -378,7 +460,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CXXConstructExpr::CK_Delegating: {
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
@@ -502,8 +584,15 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
}
}
+ ExplodedNodeSet DstPostArgumentCleanup;
+ for (auto I : DstEvaluated)
+ finishArgumentConstruction(DstPostArgumentCleanup, I, *Call);
+
+ // If there were other constructors called for object-type arguments
+ // of this constructor, clean them up.
ExplodedNodeSet DstPostCall;
- getCheckerManager().runCheckersForPostCall(DstPostCall, DstEvaluated,
+ getCheckerManager().runCheckersForPostCall(DstPostCall,
+ DstPostArgumentCleanup,
*Call, *this);
getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
}
@@ -551,7 +640,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
ProgramStateRef State = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
- CNE->getStartLoc(),
+ CNE->getBeginLoc(),
"Error evaluating New Allocator Call");
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
@@ -632,7 +721,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
ProgramStateRef State = Pred->getState();
// Retrieve the stored operator new() return value.
- if (AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
symVal = *getObjectUnderConstruction(State, CNE, LCtx);
State = finishObjectConstruction(State, CNE, LCtx);
}
@@ -652,7 +741,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
CallEventRef<CXXAllocatorCall> Call =
CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
- if (!AMgr.getAnalyzerOptions().mayInlineCXXAllocator()) {
+ if (!AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
// Invalidate placement args.
// FIXME: Once we figure out how we want allocators to work,
// we should be using the usual pre-/(default-)eval-/post-call checks here.
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 3ee67f3d6882..758195d8d911 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -349,7 +349,7 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
/*WasInlined=*/true);
} else if (CE &&
!(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
- AMgr.getAnalyzerOptions().mayInlineCXXAllocator())) {
+ AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
*this, /*WasInlined=*/true);
} else {
@@ -386,7 +386,7 @@ void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
// Do not count the small functions when determining the stack depth.
AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
const CFG *CalleeCFG = CalleeADC->getCFG();
- if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+ if (CalleeCFG->getNumBlockIDs() > AMgr.options.AlwaysInlineSize)
++StackDepth;
}
LCtx = LCtx->getParent();
@@ -406,9 +406,8 @@ namespace {
};
} // end anonymous namespace
-REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
- CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
- unsigned))
+REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
+ const MemRegion *, unsigned)
bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
NodeBuilder &Bldr, ExplodedNode *Pred,
@@ -505,6 +504,50 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
*this);
}
+ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
+ const CallEvent &Call) {
+ const Expr *E = Call.getOriginExpr();
+ // FIXME: Constructors to placement arguments of operator new
+ // are not supported yet.
+ if (!E || isa<CXXNewExpr>(E))
+ return State;
+
+ const LocationContext *LC = Call.getLocationContext();
+ for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
+ unsigned I = Call.getASTArgumentIndex(CallI);
+ if (Optional<SVal> V =
+ getObjectUnderConstruction(State, {E, I}, LC)) {
+ SVal VV = *V;
+ (void)VV;
+ assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
+ ->getStackFrame()->getParent()
+ ->getStackFrame() == LC->getStackFrame());
+ State = finishObjectConstruction(State, {E, I}, LC);
+ }
+ }
+
+ return State;
+}
+
+void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ const CallEvent &Call) {
+ ProgramStateRef State = Pred->getState();
+ ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
+ if (CleanedState == State) {
+ Dst.insert(Pred);
+ return;
+ }
+
+ const Expr *E = Call.getOriginExpr();
+ const LocationContext *LC = Call.getLocationContext();
+ NodeBuilder B(Pred, Dst, *currBldrCtx);
+ static SimpleProgramPointTag Tag("ExprEngine",
+ "Finish argument construction");
+ PreStmt PP(E, LC, &Tag);
+ B.generateNode(PP, CleanedState, Pred);
+}
+
void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
const CallEvent &Call) {
// WARNING: At this time, the state attached to 'Call' may be older than the
@@ -516,7 +559,8 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run any pre-call checks using the generic call interface.
ExplodedNodeSet dstPreVisit;
- getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
+ Call, *this);
// Actually evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call, and get a callback at
@@ -525,8 +569,14 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
Call, *this);
+ // If there were other constructors called for object-type arguments
+ // of this call, clean them up.
+ ExplodedNodeSet dstArgumentCleanup;
+ for (auto I : dstCallEvaluated)
+ finishArgumentConstruction(dstArgumentCleanup, I, Call);
+
// Finally, run any post-call checks.
- getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
+ getCheckerManager().runCheckersForPostCall(Dst, dstArgumentCleanup,
Call, *this);
}
@@ -633,7 +683,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
: nullptr;
if (CC && isa<NewAllocatedObjectConstructionContext>(CC) &&
- !Opts.mayInlineCXXAllocator())
+ !Opts.MayInlineCXXAllocator)
return CIP_DisallowedOnce;
// FIXME: We don't handle constructors or destructors for arrays properly.
@@ -662,7 +712,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
// If we don't handle temporary destructors, we shouldn't inline
// their constructors.
if (CallOpts.IsTemporaryCtorOrDtor &&
- !Opts.includeTemporaryDtorsInCFG())
+ !Opts.ShouldIncludeTemporaryDtorsInCFG)
return CIP_DisallowedOnce;
// If we did not find the correct this-region, it would be pointless
@@ -693,7 +743,8 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
return CIP_DisallowedOnce;
// Allow disabling temporary destructor inlining with a separate option.
- if (CallOpts.IsTemporaryCtorOrDtor && !Opts.mayInlineCXXTemporaryDtors())
+ if (CallOpts.IsTemporaryCtorOrDtor &&
+ !Opts.MayInlineCXXTemporaryDtors)
return CIP_DisallowedOnce;
// If we did not find the correct this-region, it would be pointless
@@ -704,13 +755,13 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
break;
}
case CE_CXXAllocator:
- if (Opts.mayInlineCXXAllocator())
+ if (Opts.MayInlineCXXAllocator)
break;
// Do not inline allocators until we model deallocators.
// This is unfortunate, but basically necessary for smart pointers and such.
return CIP_DisallowedAlways;
case CE_ObjCMessage:
- if (!Opts.mayInlineObjCMethod())
+ if (!Opts.MayInlineObjCMethod)
return CIP_DisallowedAlways;
if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
@@ -794,19 +845,19 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
if (Ctx.getLangOpts().CPlusPlus) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
// Conditionally control the inlining of template functions.
- if (!Opts.mayInlineTemplateFunctions())
+ if (!Opts.MayInlineTemplateFunctions)
if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
return false;
// Conditionally control the inlining of C++ standard library functions.
- if (!Opts.mayInlineCXXStandardLibrary())
+ if (!Opts.MayInlineCXXStandardLibrary)
if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
if (AnalysisDeclContext::isInStdNamespace(FD))
return false;
// Conditionally control the inlining of methods on objects that look
// like C++ containers.
- if (!Opts.mayInlineCXXContainerMethods())
+ if (!Opts.MayInlineCXXContainerMethods)
if (!AMgr.isInCodeFile(FD->getLocation()))
if (isContainerMethod(Ctx, FD))
return false;
@@ -815,7 +866,7 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
// We don't currently do a good job modeling shared_ptr because we can't
// see the reference count, so treating as opaque is probably the best
// idea.
- if (!Opts.mayInlineCXXSharedPtrDtor())
+ if (!Opts.MayInlineCXXSharedPtrDtor)
if (isCXXSharedPtrDtor(FD))
return false;
}
@@ -828,7 +879,7 @@ static bool mayInlineDecl(AnalysisManager &AMgr,
return false;
// Do not inline large functions.
- if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
+ if (CalleeCFG->getNumBlockIDs() > Opts.MaxInlinableSize)
return false;
// It is possible that the live variables analysis cannot be
@@ -896,21 +947,21 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
unsigned StackDepth = 0;
examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
if ((StackDepth >= Opts.InlineMaxStackDepth) &&
- ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
+ ((CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize)
|| IsRecursive))
return false;
// Do not inline large functions too many times.
if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
- Opts.getMaxTimesInlineLarge()) &&
+ Opts.MaxTimesInlineLarge) &&
CalleeCFG->getNumBlockIDs() >=
- Opts.getMinCFGSizeTreatFunctionsAsLarge()) {
+ Opts.MinCFGSizeTreatFunctionsAsLarge) {
NumReachedInlineCountMax++;
return false;
}
if (HowToInline == Inline_Minimal &&
- (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
+ (CalleeCFG->getNumBlockIDs() > Opts.AlwaysInlineSize
|| IsRecursive))
return false;
diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index d76b9cbcfaca..6b8402f621e0 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -129,7 +129,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
bool isContainerNull = state->isNull(collectionV).isConstrainedTrue();
ExplodedNodeSet dstLocation;
- evalLocation(dstLocation, S, elem, Pred, state, elementV, nullptr, false);
+ evalLocation(dstLocation, S, elem, Pred, state, elementV, false);
ExplodedNodeSet Tmp;
StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
@@ -197,7 +197,8 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
// Receiver is definitely nil, so run ObjCMessageNil callbacks and return.
if (nilState && !notNilState) {
- StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ ExplodedNodeSet dstNil;
+ StmtNodeBuilder Bldr(Pred, dstNil, *currBldrCtx);
bool HasTag = Pred->getLocation().getTag();
Pred = Bldr.generateNode(ME, Pred, nilState, nullptr,
ProgramPoint::PreStmtKind);
@@ -205,8 +206,12 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
(void)HasTag;
if (!Pred)
return;
- getCheckerManager().runCheckersForObjCMessageNil(Dst, Pred,
+
+ ExplodedNodeSet dstPostCheckers;
+ getCheckerManager().runCheckersForObjCMessageNil(dstPostCheckers, Pred,
*Msg, *this);
+ for (auto I : dstPostCheckers)
+ finishArgumentConstruction(Dst, I, *Msg);
return;
}
@@ -267,8 +272,13 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
defaultEvalCall(Bldr, Pred, *UpdatedMsg);
}
+ // If there were constructors called for object-type arguments, clean them up.
+ ExplodedNodeSet dstArgCleanup;
+ for (auto I : dstEval)
+ finishArgumentConstruction(dstArgCleanup, I, *Msg);
+
ExplodedNodeSet dstPostvisit;
- getCheckerManager().runCheckersForPostCall(dstPostvisit, dstEval,
+ getCheckerManager().runCheckersForPostCall(dstPostvisit, dstArgCleanup,
*Msg, *this);
// Finally, perform the post-condition check of the ObjCMessageExpr and store
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index d5e5f96dee0f..fc82f1176942 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -112,15 +112,24 @@ public:
FileID FID, const FileEntry *Entry, const char *declName);
// Rewrite the file specified by FID with HTML formatting.
- void RewriteFile(Rewriter &R, const SourceManager& SMgr,
- const PathPieces& path, FileID FID);
+ void RewriteFile(Rewriter &R, const PathPieces& path, FileID FID);
- /// \return Javascript for navigating the HTML report using j/k keys.
- std::string generateKeyboardNavigationJavascript();
private:
/// \return Javascript for displaying shortcuts help;
- std::string showHelpJavascript();
+ StringRef showHelpJavascript();
+
+ /// \return Javascript for navigating the HTML report using j/k keys.
+ StringRef generateKeyboardNavigationJavascript();
+
+ /// \return JavaScript for an option to only show relevant lines.
+ std::string showRelevantLinesJavascript(
+ const PathDiagnostic &D, const PathPieces &path);
+
+ /// Write executed lines from \p D in JSON format into \p os.
+ void dumpCoverageData(const PathDiagnostic &D,
+ const PathPieces &path,
+ llvm::raw_string_ostream &os);
};
} // namespace
@@ -194,7 +203,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
FullSourceLoc L(
SMgr.getExpansionLoc(path.back()->getLocation().asLocation()),
SMgr);
- FullSourceLoc FunL(SMgr.getExpansionLoc(Body->getLocStart()), SMgr);
+ FullSourceLoc FunL(SMgr.getExpansionLoc(Body->getBeginLoc()), SMgr);
offsetDecl = L.getExpansionLineNumber() - FunL.getExpansionLineNumber();
}
}
@@ -209,7 +218,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
int FD;
SmallString<128> Model, ResultPath;
- if (!AnalyzerOpts.shouldWriteStableReportFilename()) {
+ if (!AnalyzerOpts.ShouldWriteStableReportFilename) {
llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
if (std::error_code EC =
llvm::sys::fs::make_absolute(Model)) {
@@ -269,7 +278,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
continue;
FileIDs.push_back(FID);
- RewriteFile(R, SMgr, path, FID);
+ RewriteFile(R, path, FID);
}
if (SupportsCrossFileDiagnostics && FileIDs.size() > 1) {
@@ -332,28 +341,12 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
return os.str();
}
-/// Write executed lines from \p D in JSON format into \p os.
-static void serializeExecutedLines(
+void HTMLDiagnostics::dumpCoverageData(
const PathDiagnostic &D,
const PathPieces &path,
llvm::raw_string_ostream &os) {
- // Copy executed lines from path diagnostics.
- std::map<unsigned, std::set<unsigned>> ExecutedLines;
- for (auto I = D.executedLines_begin(),
- E = D.executedLines_end(); I != E; ++I) {
- std::set<unsigned> &LinesInFile = ExecutedLines[I->first];
- for (unsigned LineNo : I->second) {
- LinesInFile.insert(LineNo);
- }
- }
- // We need to include all lines for which any kind of diagnostics appears.
- for (const auto &P : path) {
- FullSourceLoc Loc = P->getLocation().asLocation().getExpansionLoc();
- FileID FID = Loc.getFileID();
- unsigned LineNo = Loc.getLineNumber();
- ExecutedLines[FID.getHashValue()].insert(LineNo);
- }
+ const FilesToLineNumsMap &ExecutedLines = D.getExecutedLines();
os << "var relevant_lines = {";
for (auto I = ExecutedLines.begin(),
@@ -361,7 +354,7 @@ static void serializeExecutedLines(
if (I != ExecutedLines.begin())
os << ", ";
- os << "\"" << I->first << "\": {";
+ os << "\"" << I->first.getHashValue() << "\": {";
for (unsigned LineNo : I->second) {
if (LineNo != *(I->second.begin()))
os << ", ";
@@ -374,13 +367,12 @@ static void serializeExecutedLines(
os << "};";
}
-/// \return JavaScript for an option to only show relevant lines.
-static std::string showRelevantLinesJavascript(
+std::string HTMLDiagnostics::showRelevantLinesJavascript(
const PathDiagnostic &D, const PathPieces &path) {
std::string s;
llvm::raw_string_ostream os(s);
os << "<script type='text/javascript'>\n";
- serializeExecutedLines(D, path, os);
+ dumpCoverageData(D, path, os);
os << R"<<<(
var filterCounterexample = function (hide) {
@@ -586,7 +578,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
}
-std::string HTMLDiagnostics::showHelpJavascript() {
+StringRef HTMLDiagnostics::showHelpJavascript() {
return R"<<<(
<script type='text/javascript'>
@@ -614,8 +606,8 @@ window.addEventListener("keydown", function (event) {
)<<<";
}
-void HTMLDiagnostics::RewriteFile(Rewriter &R, const SourceManager& SMgr,
- const PathPieces& path, FileID FID) {
+void HTMLDiagnostics::RewriteFile(Rewriter &R,
+ const PathPieces& path, FileID FID) {
// Process the path.
// Maintain the counts of extra note pieces separately.
unsigned TotalPieces = path.size();
@@ -944,7 +936,7 @@ void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
}
-std::string HTMLDiagnostics::generateKeyboardNavigationJavascript() {
+StringRef HTMLDiagnostics::generateKeyboardNavigationJavascript() {
return R"<<<(
<script type='text/javascript'>
var digitMatcher = new RegExp("[0-9]+");
@@ -997,7 +989,8 @@ var numToId = function(num) {
};
var navigateTo = function(up) {
- var numItems = document.querySelectorAll(".line > .msg").length;
+ var numItems = document.querySelectorAll(
+ ".line > .msgEvent, .line > .msgControl").length;
var currentSelected = findNum();
var newSelected = move(currentSelected, up, numItems);
var newEl = numToId(newSelected, numItems);
diff --git a/lib/StaticAnalyzer/Core/LoopWidening.cpp b/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 9192f49eac6d..8f6cb9a6b09e 100644
--- a/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -81,11 +81,12 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
// 'this' pointer is not an lvalue, we should not invalidate it. If the loop
// is located in a method, constructor or destructor, the value of 'this'
- // pointer shoule remain unchanged.
- if (const CXXMethodDecl *CXXMD = dyn_cast<CXXMethodDecl>(STC->getDecl())) {
- const CXXThisRegion *ThisR = MRMgr.getCXXThisRegion(
- CXXMD->getThisType(STC->getAnalysisDeclContext()->getASTContext()),
- STC);
+ // pointer should remain unchanged. Ignore static methods, since they do not
+ // have 'this' pointers.
+ const CXXMethodDecl *CXXMD = dyn_cast<CXXMethodDecl>(STC->getDecl());
+ if (CXXMD && !CXXMD->isStatic()) {
+ const CXXThisRegion *ThisR =
+ MRMgr.getCXXThisRegion(CXXMD->getThisType(), STC);
ITraits.setTrait(ThisR,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
}
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index cb2122c7749e..9a1d4d73c20b 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -225,6 +225,10 @@ QualType CXXBaseObjectRegion::getValueType() const {
return QualType(getDecl()->getTypeForDecl(), 0);
}
+QualType CXXDerivedObjectRegion::getValueType() const {
+ return QualType(getDecl()->getTypeForDecl(), 0);
+}
+
//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
@@ -404,6 +408,17 @@ void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ProfileRegion(ID, getDecl(), isVirtual(), superRegion);
}
+void CXXDerivedObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *RD,
+ const MemRegion *SReg) {
+ ID.AddPointer(RD);
+ ID.AddPointer(SReg);
+}
+
+void CXXDerivedObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, getDecl(), superRegion);
+}
+
//===----------------------------------------------------------------------===//
// Region anchors.
//===----------------------------------------------------------------------===//
@@ -442,7 +457,7 @@ void MemRegion::dumpToStream(raw_ostream &os) const {
}
void AllocaRegion::dumpToStream(raw_ostream &os) const {
- os << "alloca{" << static_cast<const void *>(Ex) << ',' << Cnt << '}';
+ os << "alloca{S" << Ex->getID(getContext()) << ',' << Cnt << '}';
}
void FunctionCodeRegion::dumpToStream(raw_ostream &os) const {
@@ -466,16 +481,20 @@ void BlockDataRegion::dumpToStream(raw_ostream &os) const {
void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
// FIXME: More elaborate pretty-printing.
- os << "{ " << static_cast<const void *>(CL) << " }";
+ os << "{ S" << CL->getID(getContext()) << " }";
}
void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
- os << "temp_object{" << getValueType().getAsString() << ','
- << static_cast<const void *>(Ex) << '}';
+ os << "temp_object{" << getValueType().getAsString() << ", "
+ << "S" << Ex->getID(getContext()) << '}';
}
void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
- os << "base{" << superRegion << ',' << getDecl()->getName() << '}';
+ os << "Base{" << superRegion << ',' << getDecl()->getName() << '}';
+}
+
+void CXXDerivedObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "Derived{" << superRegion << ',' << getDecl()->getName() << '}';
}
void CXXThisRegion::dumpToStream(raw_ostream &os) const {
@@ -483,7 +502,7 @@ void CXXThisRegion::dumpToStream(raw_ostream &os) const {
}
void ElementRegion::dumpToStream(raw_ostream &os) const {
- os << "element{" << superRegion << ','
+ os << "Element{" << superRegion << ','
<< Index << ',' << getElementType().getAsString() << '}';
}
@@ -492,7 +511,7 @@ void FieldRegion::dumpToStream(raw_ostream &os) const {
}
void ObjCIvarRegion::dumpToStream(raw_ostream &os) const {
- os << "ivar{" << superRegion << ',' << *getDecl() << '}';
+ os << "Ivar{" << superRegion << ',' << *getDecl() << '}';
}
void StringRegion::dumpToStream(raw_ostream &os) const {
@@ -516,7 +535,7 @@ void VarRegion::dumpToStream(raw_ostream &os) const {
if (const IdentifierInfo *ID = VD->getIdentifier())
os << ID->getName();
else
- os << "VarRegion{" << static_cast<const void *>(this) << '}';
+ os << "VarRegion{D" << VD->getID() << '}';
}
LLVM_DUMP_METHOD void RegionRawOffset::dump() const {
@@ -578,7 +597,7 @@ void MemRegion::printPretty(raw_ostream &os) const {
os << "'";
}
-void MemRegion::printPrettyAsExpr(raw_ostream &os) const {
+void MemRegion::printPrettyAsExpr(raw_ostream &) const {
llvm_unreachable("This region cannot be printed pretty.");
}
@@ -630,6 +649,14 @@ void CXXBaseObjectRegion::printPrettyAsExpr(raw_ostream &os) const {
superRegion->printPrettyAsExpr(os);
}
+bool CXXDerivedObjectRegion::canPrintPrettyAsExpr() const {
+ return superRegion->canPrintPrettyAsExpr();
+}
+
+void CXXDerivedObjectRegion::printPrettyAsExpr(raw_ostream &os) const {
+ superRegion->printPrettyAsExpr(os);
+}
+
std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
std::string VariableName;
std::string ArrayIndices;
@@ -1061,6 +1088,12 @@ MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
return getSubRegion<CXXBaseObjectRegion>(RD, IsVirtual, Super);
}
+const CXXDerivedObjectRegion *
+MemRegionManager::getCXXDerivedObjectRegion(const CXXRecordDecl *RD,
+ const SubRegion *Super) {
+ return getSubRegion<CXXDerivedObjectRegion>(RD, Super);
+}
+
const CXXThisRegion*
MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
const LocationContext *LC) {
@@ -1072,9 +1105,8 @@ MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
// FIXME: when operator() of lambda is analyzed as a top level function and
// 'this' refers to a this to the enclosing scope, there is no right region to
// return.
- while (!LC->inTopFrame() &&
- (!D || D->isStatic() ||
- PT != D->getThisType(getContext())->getAs<PointerType>())) {
+ while (!LC->inTopFrame() && (!D || D->isStatic() ||
+ PT != D->getThisType()->getAs<PointerType>())) {
LC = LC->getParent();
D = dyn_cast<CXXMethodDecl>(LC->getDecl());
}
@@ -1131,6 +1163,7 @@ const MemRegion *MemRegion::getBaseRegion() const {
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
+ case MemRegion::CXXDerivedObjectRegionKind:
R = cast<SubRegion>(R)->getSuperRegion();
continue;
default:
@@ -1141,7 +1174,16 @@ const MemRegion *MemRegion::getBaseRegion() const {
return R;
}
-bool MemRegion::isSubRegionOf(const MemRegion *R) const {
+// getgetMostDerivedObjectRegion gets the region of the root class of a C++
+// class hierarchy.
+const MemRegion *MemRegion::getMostDerivedObjectRegion() const {
+ const MemRegion *R = this;
+ while (const auto *BR = dyn_cast<CXXBaseObjectRegion>(R))
+ R = BR->getSuperRegion();
+ return R;
+}
+
+bool MemRegion::isSubRegionOf(const MemRegion *) const {
return false;
}
@@ -1149,7 +1191,7 @@ bool MemRegion::isSubRegionOf(const MemRegion *R) const {
// View handling.
//===----------------------------------------------------------------------===//
-const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
+const MemRegion *MemRegion::StripCasts(bool StripBaseAndDerivedCasts) const {
const MemRegion *R = this;
while (true) {
switch (R->getKind()) {
@@ -1161,9 +1203,10 @@ const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
break;
}
case CXXBaseObjectRegionKind:
- if (!StripBaseCasts)
+ case CXXDerivedObjectRegionKind:
+ if (!StripBaseAndDerivedCasts)
return R;
- R = cast<CXXBaseObjectRegion>(R)->getSuperRegion();
+ R = cast<TypedValueRegion>(R)->getSuperRegion();
break;
default:
return R;
@@ -1344,6 +1387,12 @@ static RegionOffset calculateOffset(const MemRegion *R) {
Offset += BaseOffset.getQuantity() * R->getContext().getCharWidth();
break;
}
+
+ case MemRegion::CXXDerivedObjectRegionKind: {
+ // TODO: Store the base type in the CXXDerivedObjectRegion and use it.
+ goto Finish;
+ }
+
case MemRegion::ElementRegionKind: {
const auto *ER = cast<ElementRegion>(R);
R = ER->getSuperRegion();
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 1b698ec5c086..3e93bb6a7c4f 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -536,7 +536,7 @@ PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) {
static SourceLocation getValidSourceLocation(const Stmt* S,
LocationOrAnalysisDeclContext LAC,
bool UseEnd = false) {
- SourceLocation L = UseEnd ? S->getLocEnd() : S->getLocStart();
+ SourceLocation L = UseEnd ? S->getEndLoc() : S->getBeginLoc();
assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
"be passed to PathDiagnosticLocation upon creation.");
@@ -562,13 +562,13 @@ static SourceLocation getValidSourceLocation(const Stmt* S,
if (!Parent) {
const Stmt *Body = ADC->getBody();
if (Body)
- L = Body->getLocStart();
+ L = Body->getBeginLoc();
else
- L = ADC->getDecl()->getLocEnd();
+ L = ADC->getDecl()->getEndLoc();
break;
}
- L = UseEnd ? Parent->getLocEnd() : Parent->getLocStart();
+ L = UseEnd ? Parent->getEndLoc() : Parent->getBeginLoc();
} while (!L.isValid());
}
@@ -635,7 +635,7 @@ getLocationForCaller(const StackFrameContext *SFC,
PathDiagnosticLocation
PathDiagnosticLocation::createBegin(const Decl *D,
const SourceManager &SM) {
- return PathDiagnosticLocation(D->getLocStart(), SM, SingleLocK);
+ return PathDiagnosticLocation(D->getBeginLoc(), SM, SingleLocK);
}
PathDiagnosticLocation
@@ -695,7 +695,7 @@ PathDiagnosticLocation::createDeclBegin(const LocationContext *LC,
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
if (const auto *CS = dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
if (!CS->body_empty()) {
- SourceLocation Loc = (*CS->body_begin())->getLocStart();
+ SourceLocation Loc = (*CS->body_begin())->getBeginLoc();
return PathDiagnosticLocation(Loc, SM, SingleLocK);
}
@@ -723,6 +723,8 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
} else if (Optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
return PathDiagnosticLocation(PIP->getInitializer()->getSourceLocation(),
SMng);
+ } else if (Optional<PreImplicitCall> PIC = P.getAs<PreImplicitCall>()) {
+ return PathDiagnosticLocation(PIC->getLocation(), SMng);
} else if (Optional<PostImplicitCall> PIE = P.getAs<PostImplicitCall>()) {
return PathDiagnosticLocation(PIE->getLocation(), SMng);
} else if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
@@ -736,10 +738,10 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
} else if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
CFGElement BlockFront = BE->getBlock()->front();
if (auto StmtElt = BlockFront.getAs<CFGStmt>()) {
- return PathDiagnosticLocation(StmtElt->getStmt()->getLocStart(), SMng);
+ return PathDiagnosticLocation(StmtElt->getStmt()->getBeginLoc(), SMng);
} else if (auto NewAllocElt = BlockFront.getAs<CFGNewAllocator>()) {
return PathDiagnosticLocation(
- NewAllocElt->getAllocatorExpr()->getLocStart(), SMng);
+ NewAllocElt->getAllocatorExpr()->getBeginLoc(), SMng);
}
llvm_unreachable("Unexpected CFG element at front of block");
} else {
@@ -774,18 +776,20 @@ const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
}
// Otherwise, see if the node's program point directly points to a statement.
ProgramPoint P = N->getLocation();
- if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+ if (auto SP = P.getAs<StmtPoint>())
return SP->getStmt();
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>())
+ if (auto BE = P.getAs<BlockEdge>())
return BE->getSrc()->getTerminator();
- if (Optional<CallEnter> CE = P.getAs<CallEnter>())
+ if (auto CE = P.getAs<CallEnter>())
return CE->getCallExpr();
- if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>())
+ if (auto CEE = P.getAs<CallExitEnd>())
return CEE->getCalleeContext()->getCallSite();
- if (Optional<PostInitializer> PIPP = P.getAs<PostInitializer>())
+ if (auto PIPP = P.getAs<PostInitializer>())
return PIPP->getInitializer()->getInit();
- if (Optional<CallExitBegin> CEB = P.getAs<CallExitBegin>())
+ if (auto CEB = P.getAs<CallExitBegin>())
return CEB->getReturnStmt();
+ if (auto FEP = P.getAs<FunctionExitPoint>())
+ return FEP->getStmt();
return nullptr;
}
@@ -822,17 +826,21 @@ PathDiagnosticLocation
const SourceManager &SM) {
assert(N && "Cannot create a location with a null node.");
const Stmt *S = getStmt(N);
+ const LocationContext *LC = N->getLocationContext();
if (!S) {
// If this is an implicit call, return the implicit call point location.
if (Optional<PreImplicitCall> PIE = N->getLocationAs<PreImplicitCall>())
return PathDiagnosticLocation(PIE->getLocation(), SM);
+ if (auto FE = N->getLocationAs<FunctionExitPoint>()) {
+ if (const ReturnStmt *RS = FE->getStmt())
+ return PathDiagnosticLocation::createBegin(RS, SM, LC);
+ }
S = getNextStmt(N);
}
if (S) {
ProgramPoint P = N->getLocation();
- const LocationContext *LC = N->getLocationContext();
// For member expressions, return the location of the '.' or '->'.
if (const auto *ME = dyn_cast<MemberExpr>(S))
@@ -845,7 +853,7 @@ PathDiagnosticLocation
if (P.getAs<PostStmtPurgeDeadSymbols>())
return PathDiagnosticLocation::createEnd(S, SM, LC);
- if (S->getLocStart().isValid())
+ if (S->getBeginLoc().isValid())
return PathDiagnosticLocation(S, SM, LC);
return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM);
}
@@ -904,7 +912,7 @@ PathDiagnosticRange
const auto *DS = cast<DeclStmt>(S);
if (DS->isSingleDecl()) {
// Should always be the case, but we'll be defensive.
- return SourceRange(DS->getLocStart(),
+ return SourceRange(DS->getBeginLoc(),
DS->getSingleDecl()->getLocation());
}
break;
@@ -964,7 +972,7 @@ void PathDiagnosticLocation::flatten() {
//===----------------------------------------------------------------------===//
std::shared_ptr<PathDiagnosticCallPiece>
-PathDiagnosticCallPiece::construct(const ExplodedNode *N, const CallExitEnd &CE,
+PathDiagnosticCallPiece::construct(const CallExitEnd &CE,
const SourceManager &SM) {
const Decl *caller = CE.getLocationContext()->getDecl();
PathDiagnosticLocation pos = getLocationForCaller(CE.getCalleeContext(),
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index cfe780db9ec9..db4cf76578d8 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/TokenConcatenation.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
@@ -24,20 +25,26 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
+
using namespace clang;
using namespace ento;
using namespace markup;
+//===----------------------------------------------------------------------===//
+// Declarations of helper classes and functions for emitting bug reports in
+// plist format.
+//===----------------------------------------------------------------------===//
+
namespace {
class PlistDiagnostics : public PathDiagnosticConsumer {
const std::string OutputFile;
- const LangOptions &LangOpts;
+ const Preprocessor &PP;
+ AnalyzerOptions &AnOpts;
const bool SupportsCrossFileDiagnostics;
- const bool SerializeStatistics;
public:
PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
const std::string& prefix,
- const LangOptions &LangOpts,
+ const Preprocessor &PP,
bool supportsMultipleFiles);
~PlistDiagnostics() override {}
@@ -59,37 +66,116 @@ namespace {
};
} // end anonymous namespace
-PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
- const std::string& output,
- const LangOptions &LO,
- bool supportsMultipleFiles)
- : OutputFile(output),
- LangOpts(LO),
- SupportsCrossFileDiagnostics(supportsMultipleFiles),
- SerializeStatistics(AnalyzerOpts.shouldSerializeStats()) {}
+namespace {
-void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
- PathDiagnosticConsumers &C,
- const std::string& s,
- const Preprocessor &PP) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
- PP.getLangOpts(), false));
-}
+/// A helper class for emitting a single report.
+class PlistPrinter {
+ const FIDMap& FM;
+ AnalyzerOptions &AnOpts;
+ const Preprocessor &PP;
+ llvm::SmallVector<const PathDiagnosticMacroPiece *, 0> MacroPieces;
+
+public:
+ PlistPrinter(const FIDMap& FM, AnalyzerOptions &AnOpts,
+ const Preprocessor &PP)
+ : FM(FM), AnOpts(AnOpts), PP(PP) {
+ }
-void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
- PathDiagnosticConsumers &C,
- const std::string &s,
- const Preprocessor &PP) {
- C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
- PP.getLangOpts(), true));
-}
+ void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P) {
+ ReportPiece(o, P, /*indent*/ 4, /*depth*/ 0, /*includeControlFlow*/ true);
+
+ // Don't emit a warning about an unused private field.
+ (void)AnOpts;
+ }
+
+ /// Print the expansions of the collected macro pieces.
+ ///
+ /// Each time ReportDiag is called on a PathDiagnosticMacroPiece (or, if one
+ /// is found through a call piece, etc), it's subpieces are reported, and the
+ /// piece itself is collected. Call this function after the entire bugpath
+ /// was reported.
+ void ReportMacroExpansions(raw_ostream &o, unsigned indent);
+
+private:
+ void ReportPiece(raw_ostream &o, const PathDiagnosticPiece &P,
+ unsigned indent, unsigned depth, bool includeControlFlow,
+ bool isKeyEvent = false) {
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ if (includeControlFlow)
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), indent);
+ break;
+ case PathDiagnosticPiece::Call:
+ ReportCall(o, cast<PathDiagnosticCallPiece>(P), indent,
+ depth);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticEventPiece>(P), indent, depth,
+ isKeyEvent);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacroSubPieces(o, cast<PathDiagnosticMacroPiece>(P), indent,
+ depth);
+ break;
+ case PathDiagnosticPiece::Note:
+ ReportNote(o, cast<PathDiagnosticNotePiece>(P), indent);
+ break;
+ }
+ }
+
+ void EmitRanges(raw_ostream &o, const ArrayRef<SourceRange> Ranges,
+ unsigned indent);
+ void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent);
+
+ void ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ unsigned indent);
+ void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
+ unsigned indent, unsigned depth, bool isKeyEvent = false);
+ void ReportCall(raw_ostream &o, const PathDiagnosticCallPiece &P,
+ unsigned indent, unsigned depth);
+ void ReportMacroSubPieces(raw_ostream &o, const PathDiagnosticMacroPiece& P,
+ unsigned indent, unsigned depth);
+ void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
+ unsigned indent);
+};
+
+} // end of anonymous namespace
-static void EmitRanges(raw_ostream &o,
- const ArrayRef<SourceRange> Ranges,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent) {
+namespace {
+
+struct ExpansionInfo {
+ std::string MacroName;
+ std::string Expansion;
+ ExpansionInfo(std::string N, std::string E)
+ : MacroName(std::move(N)), Expansion(std::move(E)) {}
+};
+
+} // end of anonymous namespace
+
+static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
+ AnalyzerOptions &AnOpts,
+ const Preprocessor &PP,
+ const PathPieces &Path);
+
+/// Print coverage information to output stream {@code o}.
+/// May modify the used list of files {@code Fids} by inserting new ones.
+static void printCoverage(const PathDiagnostic *D,
+ unsigned InputIndentLevel,
+ SmallVectorImpl<FileID> &Fids,
+ FIDMap &FM,
+ llvm::raw_fd_ostream &o);
+
+static ExpansionInfo getExpandedMacro(SourceLocation MacroLoc,
+ const Preprocessor &PP);
+
+//===----------------------------------------------------------------------===//
+// Methods of PlistPrinter.
+//===----------------------------------------------------------------------===//
+
+void PlistPrinter::EmitRanges(raw_ostream &o,
+ const ArrayRef<SourceRange> Ranges,
+ unsigned indent) {
if (Ranges.empty())
return;
@@ -97,6 +183,10 @@ static void EmitRanges(raw_ostream &o,
Indent(o, indent) << "<key>ranges</key>\n";
Indent(o, indent) << "<array>\n";
++indent;
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
for (auto &R : Ranges)
EmitRange(o, SM,
Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts),
@@ -105,7 +195,8 @@ static void EmitRanges(raw_ostream &o,
Indent(o, indent) << "</array>\n";
}
-static void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent) {
+void PlistPrinter::EmitMessage(raw_ostream &o, StringRef Message,
+ unsigned indent) {
// Output the text.
assert(!Message.empty());
Indent(o, indent) << "<key>extended_message</key>\n";
@@ -119,12 +210,12 @@ static void EmitMessage(raw_ostream &o, StringRef Message, unsigned indent) {
EmitString(o, Message) << '\n';
}
-static void ReportControlFlow(raw_ostream &o,
- const PathDiagnosticControlFlowPiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent) {
+void PlistPrinter::ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ unsigned indent) {
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -173,13 +264,11 @@ static void ReportControlFlow(raw_ostream &o,
Indent(o, indent) << "</dict>\n";
}
-static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool isKeyEvent = false) {
+void PlistPrinter::ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
+ unsigned indent, unsigned depth,
+ bool isKeyEvent) {
+
+ const SourceManager &SM = PP.getSourceManager();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -198,7 +287,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
// Output the ranges (if any).
ArrayRef<SourceRange> Ranges = P.getRanges();
- EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
+ EmitRanges(o, Ranges, indent);
// Output the call depth.
Indent(o, indent) << "<key>depth</key>";
@@ -212,61 +301,80 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticEventPiece& P,
Indent(o, indent); o << "</dict>\n";
}
-static void ReportPiece(raw_ostream &o,
- const PathDiagnosticPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool includeControlFlow,
- bool isKeyEvent = false);
-
-static void ReportCall(raw_ostream &o,
- const PathDiagnosticCallPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportCall(raw_ostream &o, const PathDiagnosticCallPiece &P,
+ unsigned indent,
+ unsigned depth) {
if (auto callEnter = P.getCallEnterEvent())
- ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true,
+ ReportPiece(o, *callEnter, indent, depth, /*includeControlFlow*/ true,
P.isLastInMainSourceFile());
++depth;
if (auto callEnterWithinCaller = P.getCallEnterWithinCallerEvent())
- ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
- indent, depth, true);
+ ReportPiece(o, *callEnterWithinCaller, indent, depth,
+ /*includeControlFlow*/ true);
for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
- ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+ ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ true);
--depth;
if (auto callExit = P.getCallExitEvent())
- ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+ ReportPiece(o, *callExit, indent, depth, /*includeControlFlow*/ true);
}
-static void ReportMacro(raw_ostream &o,
- const PathDiagnosticMacroPiece& P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportMacroSubPieces(raw_ostream &o,
+ const PathDiagnosticMacroPiece& P,
+ unsigned indent, unsigned depth) {
+ MacroPieces.push_back(&P);
- for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
- I!=E; ++I) {
- ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
+ for (PathPieces::const_iterator I = P.subPieces.begin(),
+ E = P.subPieces.end();
+ I != E; ++I) {
+ ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ false);
+ }
+}
+
+void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
+
+ for (const PathDiagnosticMacroPiece *P : MacroPieces) {
+ const SourceManager &SM = PP.getSourceManager();
+ ExpansionInfo EI = getExpandedMacro(P->getLocation().asLocation(), PP);
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ // Output the location.
+ FullSourceLoc L = P->getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, L, FM, indent);
+
+ // Output the ranges (if any).
+ ArrayRef<SourceRange> Ranges = P->getRanges();
+ EmitRanges(o, Ranges, indent);
+
+ // Output the macro name.
+ Indent(o, indent) << "<key>name</key>";
+ EmitString(o, EI.MacroName) << '\n';
+
+ // Output what it expands into.
+ Indent(o, indent) << "<key>expansion</key>";
+ EmitString(o, EI.Expansion) << '\n';
+
+ // Finish up.
+ --indent;
+ Indent(o, indent);
+ o << "</dict>\n";
}
}
-static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
- const FIDMap& FM,
- const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth) {
+void PlistPrinter::ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
+ unsigned indent) {
+
+ const SourceManager &SM = PP.getSourceManager();
Indent(o, indent) << "<dict>\n";
++indent;
@@ -279,7 +387,7 @@ static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
// Output the ranges (if any).
ArrayRef<SourceRange> Ranges = P.getRanges();
- EmitRanges(o, Ranges, FM, SM, LangOpts, indent);
+ EmitRanges(o, Ranges, indent);
// Output the text.
EmitMessage(o, P.getString(), indent);
@@ -289,45 +397,115 @@ static void ReportNote(raw_ostream &o, const PathDiagnosticNotePiece& P,
Indent(o, indent); o << "</dict>\n";
}
-static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts) {
- ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
+//===----------------------------------------------------------------------===//
+// Static function definitions.
+//===----------------------------------------------------------------------===//
+
+/// Print coverage information to output stream {@code o}.
+/// May modify the used list of files {@code Fids} by inserting new ones.
+static void printCoverage(const PathDiagnostic *D,
+ unsigned InputIndentLevel,
+ SmallVectorImpl<FileID> &Fids,
+ FIDMap &FM,
+ llvm::raw_fd_ostream &o) {
+ unsigned IndentLevel = InputIndentLevel;
+
+ Indent(o, IndentLevel) << "<key>ExecutedLines</key>\n";
+ Indent(o, IndentLevel) << "<dict>\n";
+ IndentLevel++;
+
+ // Mapping from file IDs to executed lines.
+ const FilesToLineNumsMap &ExecutedLines = D->getExecutedLines();
+ for (auto I = ExecutedLines.begin(), E = ExecutedLines.end(); I != E; ++I) {
+ unsigned FileKey = AddFID(FM, Fids, I->first);
+ Indent(o, IndentLevel) << "<key>" << FileKey << "</key>\n";
+ Indent(o, IndentLevel) << "<array>\n";
+ IndentLevel++;
+ for (unsigned LineNo : I->second) {
+ Indent(o, IndentLevel);
+ EmitInteger(o, LineNo) << "\n";
+ }
+ IndentLevel--;
+ Indent(o, IndentLevel) << "</array>\n";
+ }
+ IndentLevel--;
+ Indent(o, IndentLevel) << "</dict>\n";
+
+ assert(IndentLevel == InputIndentLevel);
}
-static void ReportPiece(raw_ostream &o,
- const PathDiagnosticPiece &P,
- const FIDMap& FM, const SourceManager &SM,
- const LangOptions &LangOpts,
- unsigned indent,
- unsigned depth,
- bool includeControlFlow,
- bool isKeyEvent) {
- switch (P.getKind()) {
- case PathDiagnosticPiece::ControlFlow:
- if (includeControlFlow)
- ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
- LangOpts, indent);
- break;
- case PathDiagnosticPiece::Call:
- ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
- case PathDiagnosticPiece::Event:
- ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts,
- indent, depth, isKeyEvent);
- break;
- case PathDiagnosticPiece::Macro:
- ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
- case PathDiagnosticPiece::Note:
- ReportNote(o, cast<PathDiagnosticNotePiece>(P), FM, SM, LangOpts,
- indent, depth);
- break;
+static void printBugPath(llvm::raw_ostream &o, const FIDMap& FM,
+ AnalyzerOptions &AnOpts,
+ const Preprocessor &PP,
+ const PathPieces &Path) {
+ PlistPrinter Printer(FM, AnOpts, PP);
+ assert(std::is_partitioned(
+ Path.begin(), Path.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; }) &&
+ "PathDiagnostic is not partitioned so that notes precede the rest");
+
+ PathPieces::const_iterator FirstNonNote = std::partition_point(
+ Path.begin(), Path.end(),
+ [](const std::shared_ptr<PathDiagnosticPiece> &E)
+ { return E->getKind() == PathDiagnosticPiece::Note; });
+
+ PathPieces::const_iterator I = Path.begin();
+
+ if (FirstNonNote != Path.begin()) {
+ o << " <key>notes</key>\n"
+ " <array>\n";
+
+ for (; I != FirstNonNote; ++I)
+ Printer.ReportDiag(o, **I);
+
+ o << " </array>\n";
}
+
+ o << " <key>path</key>\n";
+
+ o << " <array>\n";
+
+ for (PathPieces::const_iterator E = Path.end(); I != E; ++I)
+ Printer.ReportDiag(o, **I);
+
+ o << " </array>\n";
+
+ if (!AnOpts.ShouldDisplayMacroExpansions)
+ return;
+
+ o << " <key>macro_expansions</key>\n"
+ " <array>\n";
+ Printer.ReportMacroExpansions(o, /* indent */ 4);
+ o << " </array>\n";
}
+//===----------------------------------------------------------------------===//
+// Methods of PlistDiagnostics.
+//===----------------------------------------------------------------------===//
+
+PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+ const std::string& output,
+ const Preprocessor &PP,
+ bool supportsMultipleFiles)
+ : OutputFile(output), PP(PP), AnOpts(AnalyzerOpts),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+
+void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string& s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP,
+ /*supportsMultipleFiles*/ false));
+}
+
+void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &s,
+ const Preprocessor &PP) {
+ C.push_back(new PlistDiagnostics(AnalyzerOpts, s, PP,
+ /*supportsMultipleFiles*/ true));
+}
void PlistDiagnostics::FlushDiagnosticsImpl(
std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) {
@@ -335,17 +513,15 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// ranges of the diagnostics.
FIDMap FM;
SmallVector<FileID, 10> Fids;
- const SourceManager* SM = nullptr;
-
- if (!Diags.empty())
- SM = &Diags.front()->path.front()->getLocation().getManager();
+ const SourceManager& SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
- auto AddPieceFID = [&FM, &Fids, SM](const PathDiagnosticPiece &Piece) {
- AddFID(FM, Fids, *SM, Piece.getLocation().asLocation());
+ auto AddPieceFID = [&FM, &Fids, &SM](const PathDiagnosticPiece &Piece) {
+ AddFID(FM, Fids, SM, Piece.getLocation().asLocation());
ArrayRef<SourceRange> Ranges = Piece.getRanges();
for (const SourceRange &Range : Ranges) {
- AddFID(FM, Fids, *SM, Range.getBegin());
- AddFID(FM, Fids, *SM, Range.getEnd());
+ AddFID(FM, Fids, SM, Range.getBegin());
+ AddFID(FM, Fids, SM, Range.getEnd());
}
};
@@ -395,14 +571,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << "<dict>\n" <<
" <key>clang_version</key>\n";
EmitString(o, getClangFullVersion()) << '\n';
- o << " <key>files</key>\n"
- " <array>\n";
-
- for (FileID FID : Fids)
- EmitString(o << " ", SM->getFileEntryForID(FID)->getName()) << '\n';
-
- o << " </array>\n"
- " <key>diagnostics</key>\n"
+ o << " <key>diagnostics</key>\n"
" <array>\n";
for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
@@ -411,39 +580,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <dict>\n";
const PathDiagnostic *D = *DI;
- const PathPieces &PP = D->path;
-
- assert(std::is_partitioned(
- PP.begin(), PP.end(),
- [](const std::shared_ptr<PathDiagnosticPiece> &E)
- { return E->getKind() == PathDiagnosticPiece::Note; }) &&
- "PathDiagnostic is not partitioned so that notes precede the rest");
-
- PathPieces::const_iterator FirstNonNote = std::partition_point(
- PP.begin(), PP.end(),
- [](const std::shared_ptr<PathDiagnosticPiece> &E)
- { return E->getKind() == PathDiagnosticPiece::Note; });
-
- PathPieces::const_iterator I = PP.begin();
-
- if (FirstNonNote != PP.begin()) {
- o << " <key>notes</key>\n"
- " <array>\n";
-
- for (; I != FirstNonNote; ++I)
- ReportDiag(o, **I, FM, *SM, LangOpts);
-
- o << " </array>\n";
- }
-
- o << " <key>path</key>\n";
-
- o << " <array>\n";
-
- for (PathPieces::const_iterator E = PP.end(); I != E; ++I)
- ReportDiag(o, **I, FM, *SM, LangOpts);
-
- o << " </array>\n";
+ printBugPath(o, FM, AnOpts, PP, D->path);
// Output the bug type and bug category.
o << " <key>description</key>";
@@ -458,12 +595,12 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <!-- This hash is experimental and going to change! -->\n";
o << " <key>issue_hash_content_of_line_in_context</key>";
PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
- FullSourceLoc L(SM->getExpansionLoc(UPDLoc.isValid()
+ FullSourceLoc L(SM.getExpansionLoc(UPDLoc.isValid()
? UPDLoc.asLocation()
: D->getLocation().asLocation()),
- *SM);
+ SM);
const Decl *DeclWithIssue = D->getDeclWithIssue();
- EmitString(o, GetIssueHash(*SM, L, D->getCheckName(), D->getBugType(),
+ EmitString(o, GetIssueHash(SM, L, D->getCheckName(), D->getBugType(),
DeclWithIssue, LangOpts))
<< '\n';
@@ -507,15 +644,17 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// the leak location even after code is added between the allocation
// site and the end of scope (leak report location).
if (UPDLoc.isValid()) {
- FullSourceLoc UFunL(SM->getExpansionLoc(
- D->getUniqueingDecl()->getBody()->getLocStart()), *SM);
+ FullSourceLoc UFunL(
+ SM.getExpansionLoc(
+ D->getUniqueingDecl()->getBody()->getBeginLoc()),
+ SM);
o << " <key>issue_hash_function_offset</key><string>"
<< L.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
<< "</string>\n";
// Otherwise, use the location on which the bug is reported.
} else {
- FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM);
+ FullSourceLoc FunL(SM.getExpansionLoc(Body->getBeginLoc()), SM);
o << " <key>issue_hash_function_offset</key><string>"
<< L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
<< "</string>\n";
@@ -527,7 +666,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Output the location of the bug.
o << " <key>location</key>\n";
- EmitLocation(o, *SM, D->getLocation().asLocation(), FM, 2);
+ EmitLocation(o, SM, D->getLocation().asLocation(), FM, 2);
// Output the diagnostic to the sub-diagnostic client, if any.
if (!filesMade->empty()) {
@@ -551,13 +690,21 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
}
}
+ printCoverage(D, /*IndentLevel=*/2, Fids, FM, o);
+
// Close up the entry.
o << " </dict>\n";
}
o << " </array>\n";
- if (llvm::AreStatisticsEnabled() && SerializeStatistics) {
+ o << " <key>files</key>\n"
+ " <array>\n";
+ for (FileID FID : Fids)
+ EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
+ o << " </array>\n";
+
+ if (llvm::AreStatisticsEnabled() && AnOpts.ShouldSerializeStats) {
o << " <key>statistics</key>\n";
std::string stats;
llvm::raw_string_ostream os(stats);
@@ -569,3 +716,402 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Finish.
o << "</dict>\n</plist>";
}
+
+//===----------------------------------------------------------------------===//
+// Declarations of helper functions and data structures for expanding macros.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+using ExpArgTokens = llvm::SmallVector<Token, 2>;
+
+/// Maps unexpanded macro arguments to expanded arguments. A macro argument may
+/// need to expanded further when it is nested inside another macro.
+class MacroArgMap : public std::map<const IdentifierInfo *, ExpArgTokens> {
+public:
+ void expandFromPrevMacro(const MacroArgMap &Super);
+};
+
+struct MacroNameAndArgs {
+ std::string Name;
+ const MacroInfo *MI = nullptr;
+ MacroArgMap Args;
+
+ MacroNameAndArgs(std::string N, const MacroInfo *MI, MacroArgMap M)
+ : Name(std::move(N)), MI(MI), Args(std::move(M)) {}
+};
+
+class TokenPrinter {
+ llvm::raw_ostream &OS;
+ const Preprocessor &PP;
+
+ Token PrevTok, PrevPrevTok;
+ TokenConcatenation ConcatInfo;
+
+public:
+ TokenPrinter(llvm::raw_ostream &OS, const Preprocessor &PP)
+ : OS(OS), PP(PP), ConcatInfo(PP) {
+ PrevTok.setKind(tok::unknown);
+ PrevPrevTok.setKind(tok::unknown);
+ }
+
+ void printToken(const Token &Tok);
+};
+
+} // end of anonymous namespace
+
+/// The implementation method of getMacroExpansion: It prints the expansion of
+/// a macro to \p Printer, and returns with the name of the macro.
+///
+/// Since macros can be nested in one another, this function may call itself
+/// recursively.
+///
+/// Unfortunately, macro arguments have to expanded manually. To understand why,
+/// observe the following example:
+///
+/// #define PRINT(x) print(x)
+/// #define DO_SOMETHING(str) PRINT(str)
+///
+/// DO_SOMETHING("Cute panda cubs.");
+///
+/// As we expand the last line, we'll immediately replace PRINT(str) with
+/// print(x). The information that both 'str' and 'x' refers to the same string
+/// is an information we have to forward, hence the argument \p PrevArgs.
+static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs);
+
+/// Retrieves the name of the macro and what it's arguments expand into
+/// at \p ExpanLoc.
+///
+/// For example, for the following macro expansion:
+///
+/// #define SET_TO_NULL(x) x = 0
+/// #define NOT_SUSPICIOUS(a) \
+/// { \
+/// int b = 0; \
+/// } \
+/// SET_TO_NULL(a)
+///
+/// int *ptr = new int(4);
+/// NOT_SUSPICIOUS(&ptr);
+/// *ptr = 5;
+///
+/// When \p ExpanLoc references the last line, the macro name "NOT_SUSPICIOUS"
+/// and the MacroArgMap map { (a, &ptr) } will be returned.
+///
+/// When \p ExpanLoc references "SET_TO_NULL(a)" within the definition of
+/// "NOT_SUSPICOUS", the macro name "SET_TO_NULL" and the MacroArgMap map
+/// { (x, a) } will be returned.
+static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
+ const Preprocessor &PP);
+
+/// Retrieves the ')' token that matches '(' \p It points to.
+static MacroInfo::tokens_iterator getMatchingRParen(
+ MacroInfo::tokens_iterator It,
+ MacroInfo::tokens_iterator End);
+
+/// Retrieves the macro info for \p II refers to at \p Loc. This is important
+/// because macros can be redefined or undefined.
+static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
+ const SourceManager &SM,
+ const IdentifierInfo *II,
+ SourceLocation Loc);
+
+//===----------------------------------------------------------------------===//
+// Definitions of helper functions and methods for expanding macros.
+//===----------------------------------------------------------------------===//
+
+static ExpansionInfo getExpandedMacro(SourceLocation MacroLoc,
+ const Preprocessor &PP) {
+
+ llvm::SmallString<200> ExpansionBuf;
+ llvm::raw_svector_ostream OS(ExpansionBuf);
+ TokenPrinter Printer(OS, PP);
+ std::string MacroName =
+ getMacroNameAndPrintExpansion(Printer, MacroLoc, PP, MacroArgMap{});
+ return { MacroName, OS.str() };
+}
+
+static std::string getMacroNameAndPrintExpansion(TokenPrinter &Printer,
+ SourceLocation MacroLoc,
+ const Preprocessor &PP,
+ const MacroArgMap &PrevArgs) {
+
+ const SourceManager &SM = PP.getSourceManager();
+
+ MacroNameAndArgs Info = getMacroNameAndArgs(SM.getExpansionLoc(MacroLoc), PP);
+
+ // Manually expand its arguments from the previous macro.
+ Info.Args.expandFromPrevMacro(PrevArgs);
+
+ // Iterate over the macro's tokens and stringify them.
+ for (auto It = Info.MI->tokens_begin(), E = Info.MI->tokens_end(); It != E;
+ ++It) {
+ Token T = *It;
+
+ // If this token is not an identifier, we only need to print it.
+ if (T.isNot(tok::identifier)) {
+ Printer.printToken(T);
+ continue;
+ }
+
+ const auto *II = T.getIdentifierInfo();
+ assert(II &&
+ "This token is an identifier but has no IdentifierInfo!");
+
+ // If this token is a macro that should be expanded inside the current
+ // macro.
+ if (const MacroInfo *MI =
+ getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
+ getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP, Info.Args);
+
+ // If this is a function-like macro, skip its arguments, as
+ // getExpandedMacro() already printed them. If this is the case, let's
+ // first jump to the '(' token.
+ if (MI->getNumParams() != 0)
+ It = getMatchingRParen(++It, E);
+ continue;
+ }
+
+ // If this token is the current macro's argument, we should expand it.
+ auto ArgMapIt = Info.Args.find(II);
+ if (ArgMapIt != Info.Args.end()) {
+ for (MacroInfo::tokens_iterator ArgIt = ArgMapIt->second.begin(),
+ ArgEnd = ArgMapIt->second.end();
+ ArgIt != ArgEnd; ++ArgIt) {
+
+ // These tokens may still be macros, if that is the case, handle it the
+ // same way we did above.
+ const auto *ArgII = ArgIt->getIdentifierInfo();
+ if (!ArgII) {
+ Printer.printToken(*ArgIt);
+ continue;
+ }
+
+ const auto *MI = PP.getMacroInfo(ArgII);
+ if (!MI) {
+ Printer.printToken(*ArgIt);
+ continue;
+ }
+
+ getMacroNameAndPrintExpansion(Printer, ArgIt->getLocation(), PP,
+ Info.Args);
+ if (MI->getNumParams() != 0)
+ ArgIt = getMatchingRParen(++ArgIt, ArgEnd);
+ }
+ continue;
+ }
+
+ // If control reached here, then this token isn't a macro identifier, nor an
+ // unexpanded macro argument that we need to handle, print it.
+ Printer.printToken(T);
+ }
+
+ return Info.Name;
+}
+
+static MacroNameAndArgs getMacroNameAndArgs(SourceLocation ExpanLoc,
+ const Preprocessor &PP) {
+
+ const SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ // First, we create a Lexer to lex *at the expansion location* the tokens
+ // referring to the macro's name and its arguments.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(ExpanLoc);
+ const llvm::MemoryBuffer *MB = SM.getBuffer(LocInfo.first);
+ const char *MacroNameTokenPos = MB->getBufferStart() + LocInfo.second;
+
+ Lexer RawLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
+ MB->getBufferStart(), MacroNameTokenPos, MB->getBufferEnd());
+
+ // Acquire the macro's name.
+ Token TheTok;
+ RawLexer.LexFromRawLexer(TheTok);
+
+ std::string MacroName = PP.getSpelling(TheTok);
+
+ const auto *II = PP.getIdentifierInfo(MacroName);
+ assert(II && "Failed to acquire the IndetifierInfo for the macro!");
+
+ const MacroInfo *MI = getMacroInfoForLocation(PP, SM, II, ExpanLoc);
+ assert(MI && "The macro must've been defined at it's expansion location!");
+
+ // Acquire the macro's arguments.
+ //
+ // The rough idea here is to lex from the first left parentheses to the last
+ // right parentheses, and map the macro's unexpanded arguments to what they
+ // will be expanded to. An expanded macro argument may contain several tokens
+ // (like '3 + 4'), so we'll lex until we find a tok::comma or tok::r_paren, at
+ // which point we start lexing the next argument or finish.
+ ArrayRef<const IdentifierInfo *> MacroArgs = MI->params();
+ if (MacroArgs.empty())
+ return { MacroName, MI, {} };
+
+ RawLexer.LexFromRawLexer(TheTok);
+ assert(TheTok.is(tok::l_paren) &&
+ "The token after the macro's identifier token should be '('!");
+
+ MacroArgMap Args;
+
+ // When the macro's argument is a function call, like
+ // CALL_FN(someFunctionName(param1, param2))
+ // we will find tok::l_paren, tok::r_paren, and tok::comma that do not divide
+ // actual macro arguments, or do not represent the macro argument's closing
+ // parentheses, so we'll count how many parentheses aren't closed yet.
+ // If ParanthesesDepth
+ // * = 0, then there are no more arguments to lex.
+ // * = 1, then if we find a tok::comma, we can start lexing the next arg.
+ // * > 1, then tok::comma is a part of the current arg.
+ int ParenthesesDepth = 1;
+
+ // If we encounter __VA_ARGS__, we will lex until the closing tok::r_paren,
+ // even if we lex a tok::comma and ParanthesesDepth == 1.
+ const IdentifierInfo *__VA_ARGS__II = PP.getIdentifierInfo("__VA_ARGS__");
+
+ for (const IdentifierInfo *UnexpArgII : MacroArgs) {
+ MacroArgMap::mapped_type ExpandedArgTokens;
+
+ // One could also simply not supply a single argument to __VA_ARGS__ -- this
+ // results in a preprocessor warning, but is not an error:
+ // #define VARIADIC(ptr, ...) \
+ // someVariadicTemplateFunction(__VA_ARGS__)
+ //
+ // int *ptr;
+ // VARIADIC(ptr); // Note that there are no commas, this isn't just an
+ // // empty parameter -- there are no parameters for '...'.
+ // In any other case, ParenthesesDepth mustn't be 0 here.
+ if (ParenthesesDepth != 0) {
+
+ // Lex the first token of the next macro parameter.
+ RawLexer.LexFromRawLexer(TheTok);
+
+ while (!(ParenthesesDepth == 1 &&
+ (UnexpArgII == __VA_ARGS__II ? false : TheTok.is(tok::comma)))) {
+ assert(TheTok.isNot(tok::eof) &&
+ "EOF encountered while looking for expanded macro args!");
+
+ if (TheTok.is(tok::l_paren))
+ ++ParenthesesDepth;
+
+ if (TheTok.is(tok::r_paren))
+ --ParenthesesDepth;
+
+ if (ParenthesesDepth == 0)
+ break;
+
+ if (TheTok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(TheTok);
+
+ ExpandedArgTokens.push_back(TheTok);
+ RawLexer.LexFromRawLexer(TheTok);
+ }
+ } else {
+ assert(UnexpArgII == __VA_ARGS__II);
+ }
+
+ Args.emplace(UnexpArgII, std::move(ExpandedArgTokens));
+ }
+
+ assert(TheTok.is(tok::r_paren) &&
+ "Expanded macro argument acquisition failed! After the end of the loop"
+ " this token should be ')'!");
+
+ return { MacroName, MI, Args };
+}
+
+static MacroInfo::tokens_iterator getMatchingRParen(
+ MacroInfo::tokens_iterator It,
+ MacroInfo::tokens_iterator End) {
+
+ assert(It->is(tok::l_paren) && "This token should be '('!");
+
+ // Skip until we find the closing ')'.
+ int ParenthesesDepth = 1;
+ while (ParenthesesDepth != 0) {
+ ++It;
+
+ assert(It->isNot(tok::eof) &&
+ "Encountered EOF while attempting to skip macro arguments!");
+ assert(It != End &&
+ "End of the macro definition reached before finding ')'!");
+
+ if (It->is(tok::l_paren))
+ ++ParenthesesDepth;
+
+ if (It->is(tok::r_paren))
+ --ParenthesesDepth;
+ }
+ return It;
+}
+
+static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
+ const SourceManager &SM,
+ const IdentifierInfo *II,
+ SourceLocation Loc) {
+
+ const MacroDirective *MD = PP.getLocalMacroDirectiveHistory(II);
+ if (!MD)
+ return nullptr;
+
+ return MD->findDirectiveAtLoc(Loc, SM).getMacroInfo();
+}
+
+void MacroArgMap::expandFromPrevMacro(const MacroArgMap &Super) {
+
+ for (value_type &Pair : *this) {
+ ExpArgTokens &CurrExpArgTokens = Pair.second;
+
+ // For each token in the expanded macro argument.
+ auto It = CurrExpArgTokens.begin();
+ while (It != CurrExpArgTokens.end()) {
+ if (It->isNot(tok::identifier)) {
+ ++It;
+ continue;
+ }
+
+ const auto *II = It->getIdentifierInfo();
+ assert(II);
+
+ // Is this an argument that "Super" expands further?
+ if (!Super.count(II)) {
+ ++It;
+ continue;
+ }
+
+ const ExpArgTokens &SuperExpArgTokens = Super.at(II);
+
+ It = CurrExpArgTokens.insert(
+ It, SuperExpArgTokens.begin(), SuperExpArgTokens.end());
+ std::advance(It, SuperExpArgTokens.size());
+ It = CurrExpArgTokens.erase(It);
+ }
+ }
+}
+
+void TokenPrinter::printToken(const Token &Tok) {
+ // If this is the first token to be printed, don't print space.
+ if (PrevTok.isNot(tok::unknown)) {
+ // If the tokens were already space separated, or if they must be to avoid
+ // them being implicitly pasted, add a space between them.
+ if(Tok.hasLeadingSpace() || ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok,
+ Tok)) {
+ // AvoidConcat doesn't check for ##, don't print a space around it.
+ if (PrevTok.isNot(tok::hashhash) && Tok.isNot(tok::hashhash)) {
+ OS << ' ';
+ }
+ }
+ }
+
+ if (!Tok.isOneOf(tok::hash, tok::hashhash)) {
+ if (PrevTok.is(tok::hash))
+ OS << '\"' << PP.getSpelling(Tok) << '\"';
+ else
+ OS << PP.getSpelling(Tok);
+ }
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+}
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index 94e2e00d8bbc..2e2e2ec94f39 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -69,6 +69,10 @@ ProgramState::~ProgramState() {
stateMgr->getStoreManager().decrementReferenceCount(store);
}
+int64_t ProgramState::getID() const {
+ return getStateManager().Alloc.identifyKnownAlignedObject<ProgramState>(this);
+}
+
ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
StoreManagerCreator CreateSMgr,
ConstraintManagerCreator CreateCMgr,
@@ -121,8 +125,8 @@ ProgramStateRef ProgramState::bindLoc(Loc LV,
ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
LV, V));
const MemRegion *MR = LV.getAsRegion();
- if (MR && Mgr.getOwningEngine() && notifyChanges)
- return Mgr.getOwningEngine()->processRegionChange(newState, MR, LCtx);
+ if (MR && notifyChanges)
+ return Mgr.getOwningEngine().processRegionChange(newState, MR, LCtx);
return newState;
}
@@ -134,9 +138,7 @@ ProgramState::bindDefaultInitial(SVal loc, SVal V,
const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
const StoreRef &newStore = Mgr.StoreMgr->BindDefaultInitial(getStore(), R, V);
ProgramStateRef new_state = makeWithStore(newStore);
- return Mgr.getOwningEngine()
- ? Mgr.getOwningEngine()->processRegionChange(new_state, R, LCtx)
- : new_state;
+ return Mgr.getOwningEngine().processRegionChange(new_state, R, LCtx);
}
ProgramStateRef
@@ -145,9 +147,7 @@ ProgramState::bindDefaultZero(SVal loc, const LocationContext *LCtx) const {
const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
const StoreRef &newStore = Mgr.StoreMgr->BindDefaultZero(getStore(), R);
ProgramStateRef new_state = makeWithStore(newStore);
- return Mgr.getOwningEngine()
- ? Mgr.getOwningEngine()->processRegionChange(new_state, R, LCtx)
- : new_state;
+ return Mgr.getOwningEngine().processRegionChange(new_state, R, LCtx);
}
typedef ArrayRef<const MemRegion *> RegionList;
@@ -192,41 +192,34 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
RegionAndSymbolInvalidationTraits *ITraits,
const CallEvent *Call) const {
ProgramStateManager &Mgr = getStateManager();
- SubEngine* Eng = Mgr.getOwningEngine();
+ SubEngine &Eng = Mgr.getOwningEngine();
- InvalidatedSymbols Invalidated;
+ InvalidatedSymbols InvalidatedSyms;
if (!IS)
- IS = &Invalidated;
+ IS = &InvalidatedSyms;
RegionAndSymbolInvalidationTraits ITraitsLocal;
if (!ITraits)
ITraits = &ITraitsLocal;
- if (Eng) {
- StoreManager::InvalidatedRegions TopLevelInvalidated;
- StoreManager::InvalidatedRegions Invalidated;
- const StoreRef &newStore
- = Mgr.StoreMgr->invalidateRegions(getStore(), Values, E, Count, LCtx, Call,
- *IS, *ITraits, &TopLevelInvalidated,
- &Invalidated);
-
- ProgramStateRef newState = makeWithStore(newStore);
-
- if (CausedByPointerEscape) {
- newState = Eng->notifyCheckersOfPointerEscape(newState, IS,
- TopLevelInvalidated,
- Invalidated, Call,
- *ITraits);
- }
+ StoreManager::InvalidatedRegions TopLevelInvalidated;
+ StoreManager::InvalidatedRegions Invalidated;
+ const StoreRef &newStore
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Values, E, Count, LCtx, Call,
+ *IS, *ITraits, &TopLevelInvalidated,
+ &Invalidated);
+
+ ProgramStateRef newState = makeWithStore(newStore);
- return Eng->processRegionChanges(newState, IS, TopLevelInvalidated,
- Invalidated, LCtx, Call);
+ if (CausedByPointerEscape) {
+ newState = Eng.notifyCheckersOfPointerEscape(newState, IS,
+ TopLevelInvalidated,
+ Call,
+ *ITraits);
}
- const StoreRef &newStore =
- Mgr.StoreMgr->invalidateRegions(getStore(), Values, E, Count, LCtx, Call,
- *IS, *ITraits, nullptr, nullptr);
- return makeWithStore(newStore);
+ return Eng.processRegionChanges(newState, IS, TopLevelInvalidated,
+ Invalidated, LCtx, Call);
}
ProgramStateRef ProgramState::killBinding(Loc LV) const {
@@ -449,14 +442,16 @@ void ProgramState::setStore(const StoreRef &newStore) {
// State pretty-printing.
//===----------------------------------------------------------------------===//
-void ProgramState::print(raw_ostream &Out, const char *NL, const char *Sep,
+void ProgramState::print(raw_ostream &Out,
+ const char *NL, const char *Sep,
const LocationContext *LC) const {
// Print the store.
ProgramStateManager &Mgr = getStateManager();
- Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
+ const ASTContext &Context = getStateManager().getContext();
+ Mgr.getStoreManager().print(getStore(), Out, NL);
// Print out the environment.
- Env.print(Out, NL, Sep, LC);
+ Env.print(Out, NL, Sep, Context, LC);
// Print out the constraints.
Mgr.getConstraintManager().print(this, Out, NL, Sep);
@@ -465,13 +460,14 @@ void ProgramState::print(raw_ostream &Out, const char *NL, const char *Sep,
printDynamicTypeInfo(this, Out, NL, Sep);
// Print out tainted symbols.
- printTaint(Out, NL, Sep);
+ printTaint(Out, NL);
// Print checker-specific data.
- Mgr.getOwningEngine()->printState(Out, this, NL, Sep, LC);
+ Mgr.getOwningEngine().printState(Out, this, NL, Sep, LC);
}
-void ProgramState::printDOT(raw_ostream &Out, const LocationContext *LC) const {
+void ProgramState::printDOT(raw_ostream &Out,
+ const LocationContext *LC) const {
print(Out, "\\l", "\\|", LC);
}
@@ -480,7 +476,7 @@ LLVM_DUMP_METHOD void ProgramState::dump() const {
}
void ProgramState::printTaint(raw_ostream &Out,
- const char *NL, const char *Sep) const {
+ const char *NL) const {
TaintMapImpl TM = get<TaintMap>();
if (!TM.isEmpty())
@@ -496,7 +492,7 @@ void ProgramState::dumpTaint() const {
}
AnalysisManager& ProgramState::getAnalysisManager() const {
- return stateMgr->getOwningEngine()->getAnalysisManager();
+ return stateMgr->getOwningEngine().getAnalysisManager();
}
//===----------------------------------------------------------------------===//
@@ -652,22 +648,12 @@ bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const
return S.scan(val);
}
-bool ProgramState::scanReachableSymbols(const SVal *I, const SVal *E,
- SymbolVisitor &visitor) const {
+bool ProgramState::scanReachableSymbols(
+ llvm::iterator_range<region_iterator> Reachable,
+ SymbolVisitor &visitor) const {
ScanReachableSymbols S(this, visitor);
- for ( ; I != E; ++I) {
- if (!S.scan(*I))
- return false;
- }
- return true;
-}
-
-bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
- const MemRegion * const *E,
- SymbolVisitor &visitor) const {
- ScanReachableSymbols S(this, visitor);
- for ( ; I != E; ++I) {
- if (!S.scan(*I))
+ for (const MemRegion *R : Reachable) {
+ if (!S.scan(R))
return false;
}
return true;
@@ -835,4 +821,3 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
return false;
}
-
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index e8c7bdbde385..d9b58d0f5185 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -399,7 +399,7 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
SymbolRef Sym = I.getKey();
- if (SymReaper.maybeDead(Sym)) {
+ if (SymReaper.isDead(Sym)) {
Changed = true;
CR = CRFactory.remove(CR, Sym);
}
diff --git a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index f99853f07073..146dc20ad021 100644
--- a/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -200,6 +200,11 @@ void RangedConstraintManager::computeAdjustment(SymbolRef &Sym,
}
}
+void *ProgramStateTrait<ConstraintRange>::GDMIndex() {
+ static int Index;
+ return &Index;
+}
+
} // end of namespace ento
} // end of namespace clang
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index db6449e6d5f3..b2339be4f263 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Basic/TargetInfo.h"
@@ -61,7 +62,9 @@ private:
: P(r, k), Data(offset) {
assert(r && "Must have known regions.");
assert(getOffset() == offset && "Failed to store offset");
- assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r)) && "Not a base");
+ assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r) ||
+ isa <CXXDerivedObjectRegion>(r)) &&
+ "Not a base");
}
public:
@@ -308,7 +311,7 @@ public:
//===----------------------------------------------------------------------===//
namespace {
-class invalidateRegionsWorker;
+class InvalidateRegionsWorker;
class RegionStoreManager : public StoreManager {
public:
@@ -335,7 +338,7 @@ private:
/// A helper used to populate the work list with the given set of
/// regions.
- void populateWorkList(invalidateRegionsWorker &W,
+ void populateWorkList(InvalidateRegionsWorker &W,
ArrayRef<SVal> Values,
InvalidatedRegions *TopLevelRegions);
@@ -344,11 +347,9 @@ public:
: StoreManager(mgr), Features(f),
RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
SmallStructLimit(0) {
- if (SubEngine *Eng = StateMgr.getOwningEngine()) {
- AnalyzerOptions &Options = Eng->getAnalysisManager().options;
- SmallStructLimit =
- Options.getOptionAsInteger("region-store-small-struct-limit", 2);
- }
+ SubEngine &Eng = StateMgr.getOwningEngine();
+ AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+ SmallStructLimit = Options.RegionStoreSmallStructLimit;
}
@@ -598,8 +599,7 @@ public: // Part of public interface to class.
RBFactory.getTreeFactory());
}
- void print(Store store, raw_ostream &Out, const char* nl,
- const char *sep) override;
+ void print(Store store, raw_ostream &Out, const char* nl) override;
void iterBindings(Store store, BindingsHandler& f) override {
RegionBindingsRef B = getRegionBindings(store);
@@ -945,7 +945,7 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
}
namespace {
-class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
+class InvalidateRegionsWorker : public ClusterAnalysis<InvalidateRegionsWorker>
{
const Expr *Ex;
unsigned Count;
@@ -955,7 +955,7 @@ class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
StoreManager::InvalidatedRegions *Regions;
GlobalsFilterKind GlobalsFilter;
public:
- invalidateRegionsWorker(RegionStoreManager &rm,
+ InvalidateRegionsWorker(RegionStoreManager &rm,
ProgramStateManager &stateMgr,
RegionBindingsRef b,
const Expr *ex, unsigned count,
@@ -964,7 +964,7 @@ public:
RegionAndSymbolInvalidationTraits &ITraitsIn,
StoreManager::InvalidatedRegions *r,
GlobalsFilterKind GFK)
- : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b),
+ : ClusterAnalysis<InvalidateRegionsWorker>(rm, stateMgr, b),
Ex(ex), Count(count), LCtx(lctx), IS(is), ITraits(ITraitsIn), Regions(r),
GlobalsFilter(GFK) {}
@@ -985,14 +985,14 @@ public:
};
}
-bool invalidateRegionsWorker::AddToWorkList(const MemRegion *R) {
+bool InvalidateRegionsWorker::AddToWorkList(const MemRegion *R) {
bool doNotInvalidateSuperRegion = ITraits.hasTrait(
R, RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
const MemRegion *BaseR = doNotInvalidateSuperRegion ? R : R->getBaseRegion();
return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
}
-void invalidateRegionsWorker::VisitBinding(SVal V) {
+void InvalidateRegionsWorker::VisitBinding(SVal V) {
// A symbol? Mark it touched by the invalidation.
if (SymbolRef Sym = V.getAsSymbol())
IS.insert(Sym);
@@ -1017,7 +1017,7 @@ void invalidateRegionsWorker::VisitBinding(SVal V) {
}
}
-void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
const ClusterBindings *C) {
bool PreserveRegionsContents =
@@ -1033,6 +1033,32 @@ void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
B = B.remove(baseR);
}
+ if (const auto *TO = dyn_cast<TypedValueRegion>(baseR)) {
+ if (const auto *RD = TO->getValueType()->getAsCXXRecordDecl()) {
+
+ // Lambdas can affect all static local variables without explicitly
+ // capturing those.
+ // We invalidate all static locals referenced inside the lambda body.
+ if (RD->isLambda() && RD->getLambdaCallOperator()->getBody()) {
+ using namespace ast_matchers;
+
+ const char *DeclBind = "DeclBind";
+ StatementMatcher RefToStatic = stmt(hasDescendant(declRefExpr(
+ to(varDecl(hasStaticStorageDuration()).bind(DeclBind)))));
+ auto Matches =
+ match(RefToStatic, *RD->getLambdaCallOperator()->getBody(),
+ RD->getASTContext());
+
+ for (BoundNodes &Match : Matches) {
+ auto *VD = Match.getNodeAs<VarDecl>(DeclBind);
+ const VarRegion *ToInvalidate =
+ RM.getRegionManager().getVarRegion(VD, LCtx);
+ AddToWorkList(ToInvalidate);
+ }
+ }
+ }
+ }
+
// BlockDataRegion? If so, invalidate captured variables that are passed
// by reference.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
@@ -1181,7 +1207,7 @@ void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
B = B.addBinding(baseR, BindingKey::Direct, V);
}
-bool invalidateRegionsWorker::isInitiallyIncludedGlobalRegion(
+bool InvalidateRegionsWorker::isInitiallyIncludedGlobalRegion(
const MemRegion *R) {
switch (GlobalsFilter) {
case GFK_None:
@@ -1195,7 +1221,7 @@ bool invalidateRegionsWorker::isInitiallyIncludedGlobalRegion(
llvm_unreachable("unknown globals filter");
}
-bool invalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
+bool InvalidateRegionsWorker::includeEntireMemorySpace(const MemRegion *Base) {
if (isInitiallyIncludedGlobalRegion(Base))
return true;
@@ -1229,7 +1255,7 @@ RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
return B;
}
-void RegionStoreManager::populateWorkList(invalidateRegionsWorker &W,
+void RegionStoreManager::populateWorkList(InvalidateRegionsWorker &W,
ArrayRef<SVal> Values,
InvalidatedRegions *TopLevelRegions) {
for (ArrayRef<SVal>::iterator I = Values.begin(),
@@ -1280,7 +1306,7 @@ RegionStoreManager::invalidateRegions(Store store,
}
RegionBindingsRef B = getRegionBindings(store);
- invalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ITraits,
+ InvalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ITraits,
Invalidated, GlobalsFilter);
// Scan the bindings and generate the clusters.
@@ -1302,11 +1328,11 @@ RegionStoreManager::invalidateRegions(Store store,
case GFK_All:
B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case GFK_SystemOnly:
B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case GFK_None:
break;
}
@@ -2363,40 +2389,45 @@ RegionStoreManager::bindAggregate(RegionBindingsConstRef B,
//===----------------------------------------------------------------------===//
namespace {
-class removeDeadBindingsWorker :
- public ClusterAnalysis<removeDeadBindingsWorker> {
- SmallVector<const SymbolicRegion*, 12> Postponed;
+class RemoveDeadBindingsWorker
+ : public ClusterAnalysis<RemoveDeadBindingsWorker> {
+ using ChildrenListTy = SmallVector<const SymbolDerived *, 4>;
+ using MapParentsToDerivedTy = llvm::DenseMap<SymbolRef, ChildrenListTy>;
+
+ MapParentsToDerivedTy ParentsToDerived;
SymbolReaper &SymReaper;
const StackFrameContext *CurrentLCtx;
public:
- removeDeadBindingsWorker(RegionStoreManager &rm,
+ RemoveDeadBindingsWorker(RegionStoreManager &rm,
ProgramStateManager &stateMgr,
RegionBindingsRef b, SymbolReaper &symReaper,
const StackFrameContext *LCtx)
- : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b),
+ : ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b),
SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
- using ClusterAnalysis<removeDeadBindingsWorker>::VisitCluster;
+ using ClusterAnalysis<RemoveDeadBindingsWorker>::VisitCluster;
using ClusterAnalysis::AddToWorkList;
bool AddToWorkList(const MemRegion *R);
- bool UpdatePostponed();
void VisitBinding(SVal V);
+
+private:
+ void populateWorklistFromSymbol(SymbolRef s);
};
}
-bool removeDeadBindingsWorker::AddToWorkList(const MemRegion *R) {
+bool RemoveDeadBindingsWorker::AddToWorkList(const MemRegion *R) {
const MemRegion *BaseR = R->getBaseRegion();
return AddToWorkList(WorkListElement(BaseR), getCluster(BaseR));
}
-void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
const ClusterBindings &C) {
if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
@@ -2407,10 +2438,11 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
- if (SymReaper.isLive(SR->getSymbol()))
+ if (SymReaper.isLive(SR->getSymbol())) {
AddToWorkList(SR, &C);
- else
- Postponed.push_back(SR);
+ } else if (const auto *SD = dyn_cast<SymbolDerived>(SR->getSymbol())) {
+ ParentsToDerived[SD->getParentSymbol()].push_back(SD);
+ }
return;
}
@@ -2422,7 +2454,7 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
// CXXThisRegion in the current or parent location context is live.
if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
- const StackArgumentsSpaceRegion *StackReg =
+ const auto *StackReg =
cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
if (CurrentLCtx &&
@@ -2431,7 +2463,7 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
}
}
-void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
const ClusterBindings *C) {
if (!C)
return;
@@ -2449,7 +2481,7 @@ void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
}
}
-void removeDeadBindingsWorker::VisitBinding(SVal V) {
+void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
// Is it a LazyCompoundVal? All referenced regions are live as well.
if (Optional<nonloc::LazyCompoundVal> LCS =
V.getAs<nonloc::LazyCompoundVal>()) {
@@ -2467,6 +2499,15 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
// If V is a region, then add it to the worklist.
if (const MemRegion *R = V.getAsRegion()) {
AddToWorkList(R);
+
+ if (const auto *TVR = dyn_cast<TypedValueRegion>(R)) {
+ DefinedOrUnknownSVal RVS =
+ RM.getSValBuilder().getRegionValueSymbolVal(TVR);
+ if (const MemRegion *SR = RVS.getAsRegion()) {
+ AddToWorkList(SR);
+ }
+ }
+
SymReaper.markLive(R);
// All regions captured by a block are also live.
@@ -2480,34 +2521,37 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) {
// Update the set of live symbols.
- for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
- SI!=SE; ++SI)
+ for (auto SI = V.symbol_begin(), SE = V.symbol_end(); SI != SE; ++SI) {
+ populateWorklistFromSymbol(*SI);
+
+ for (const auto *SD : ParentsToDerived[*SI])
+ populateWorklistFromSymbol(SD);
+
SymReaper.markLive(*SI);
+ }
}
-bool removeDeadBindingsWorker::UpdatePostponed() {
- // See if any postponed SymbolicRegions are actually live now, after
- // having done a scan.
- bool changed = false;
+void RemoveDeadBindingsWorker::populateWorklistFromSymbol(SymbolRef S) {
+ if (const auto *SD = dyn_cast<SymbolData>(S)) {
+ if (Loc::isLocType(SD->getType()) && !SymReaper.isLive(SD)) {
+ const SymbolicRegion *SR = RM.getRegionManager().getSymbolicRegion(SD);
- for (SmallVectorImpl<const SymbolicRegion*>::iterator
- I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
- if (const SymbolicRegion *SR = *I) {
- if (SymReaper.isLive(SR->getSymbol())) {
- changed |= AddToWorkList(SR);
- *I = nullptr;
- }
+ if (B.contains(SR))
+ AddToWorkList(SR);
+
+ const SymbolicRegion *SHR =
+ RM.getRegionManager().getSymbolicHeapRegion(SD);
+ if (B.contains(SHR))
+ AddToWorkList(SHR);
}
}
-
- return changed;
}
StoreRef RegionStoreManager::removeDeadBindings(Store store,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
RegionBindingsRef B = getRegionBindings(store);
- removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
+ RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
@@ -2516,7 +2560,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
W.AddToWorkList(*I);
}
- do W.RunWorkList(); while (W.UpdatePostponed());
+ W.RunWorkList();
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
@@ -2525,24 +2569,9 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
const MemRegion *Base = I.getKey();
// If the cluster has been visited, we know the region has been marked.
- if (W.isVisited(Base))
- continue;
-
- // Remove the dead entry.
- B = B.remove(Base);
-
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(Base))
- SymReaper.maybeDead(SymR->getSymbol());
-
- // Mark all non-live symbols that this binding references as dead.
- const ClusterBindings &Cluster = I.getData();
- for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
- CI != CE; ++CI) {
- SVal X = CI.getData();
- SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
- for (; SI != SE; ++SI)
- SymReaper.maybeDead(*SI);
- }
+ // Otherwise, remove the dead entry.
+ if (!W.isVisited(Base))
+ B = B.remove(Base);
}
return StoreRef(B.asStore(), *this);
@@ -2553,7 +2582,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
//===----------------------------------------------------------------------===//
void RegionStoreManager::print(Store store, raw_ostream &OS,
- const char* nl, const char *sep) {
+ const char* nl) {
RegionBindingsRef B = getRegionBindings(store);
OS << "Store (direct and default bindings), "
<< B.asStore()
diff --git a/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp b/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
new file mode 100644
index 000000000000..2e40cc33381c
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/RetainSummaryManager.cpp
@@ -0,0 +1,1229 @@
+//== RetainSummaryManager.cpp - Summaries for reference counting --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines summaries implementation for retain counting, which
+// implements a reference count checker for Core Foundation, Cocoa
+// and OSObject (on Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/RetainSummaryManager.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+
+using namespace clang;
+using namespace ento;
+
+template <class T>
+constexpr static bool isOneOf() {
+ return false;
+}
+
+/// Helper function to check whether the class is one of the
+/// rest of varargs.
+template <class T, class P, class... ToCompare>
+constexpr static bool isOneOf() {
+ return std::is_same<T, P>::value || isOneOf<T, ToCompare...>();
+}
+
+namespace {
+
+/// Fake attribute class for RC* attributes.
+struct GeneralizedReturnsRetainedAttr {
+ static bool classof(const Attr *A) {
+ if (auto AA = dyn_cast<AnnotateAttr>(A))
+ return AA->getAnnotation() == "rc_ownership_returns_retained";
+ return false;
+ }
+};
+
+struct GeneralizedReturnsNotRetainedAttr {
+ static bool classof(const Attr *A) {
+ if (auto AA = dyn_cast<AnnotateAttr>(A))
+ return AA->getAnnotation() == "rc_ownership_returns_not_retained";
+ return false;
+ }
+};
+
+struct GeneralizedConsumedAttr {
+ static bool classof(const Attr *A) {
+ if (auto AA = dyn_cast<AnnotateAttr>(A))
+ return AA->getAnnotation() == "rc_ownership_consumed";
+ return false;
+ }
+};
+
+}
+
+template <class T>
+Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
+ QualType QT) {
+ ObjKind K;
+ if (isOneOf<T, CFConsumedAttr, CFReturnsRetainedAttr,
+ CFReturnsNotRetainedAttr>()) {
+ if (!TrackObjCAndCFObjects)
+ return None;
+
+ K = ObjKind::CF;
+ } else if (isOneOf<T, NSConsumedAttr, NSConsumesSelfAttr,
+ NSReturnsAutoreleasedAttr, NSReturnsRetainedAttr,
+ NSReturnsNotRetainedAttr, NSConsumesSelfAttr>()) {
+
+ if (!TrackObjCAndCFObjects)
+ return None;
+
+ if (isOneOf<T, NSReturnsRetainedAttr, NSReturnsAutoreleasedAttr,
+ NSReturnsNotRetainedAttr>() &&
+ !cocoa::isCocoaObjectRef(QT))
+ return None;
+ K = ObjKind::ObjC;
+ } else if (isOneOf<T, OSConsumedAttr, OSConsumesThisAttr,
+ OSReturnsNotRetainedAttr, OSReturnsRetainedAttr,
+ OSReturnsRetainedOnZeroAttr,
+ OSReturnsRetainedOnNonZeroAttr>()) {
+ if (!TrackOSObjects)
+ return None;
+ K = ObjKind::OS;
+ } else if (isOneOf<T, GeneralizedReturnsNotRetainedAttr,
+ GeneralizedReturnsRetainedAttr,
+ GeneralizedConsumedAttr>()) {
+ K = ObjKind::Generalized;
+ } else {
+ llvm_unreachable("Unexpected attribute");
+ }
+ if (D->hasAttr<T>())
+ return K;
+ return None;
+}
+
+template <class T1, class T2, class... Others>
+Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
+ QualType QT) {
+ if (auto Out = hasAnyEnabledAttrOf<T1>(D, QT))
+ return Out;
+ return hasAnyEnabledAttrOf<T2, Others...>(D, QT);
+}
+
+const RetainSummary *
+RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
+ // Unique "simple" summaries -- those without ArgEffects.
+ if (OldSumm.isSimple()) {
+ ::llvm::FoldingSetNodeID ID;
+ OldSumm.Profile(ID);
+
+ void *Pos;
+ CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
+
+ if (!N) {
+ N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
+ new (N) CachedSummaryNode(OldSumm);
+ SimpleSummaries.InsertNode(N, Pos);
+ }
+
+ return &N->getValue();
+ }
+
+ RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(OldSumm);
+ return Summ;
+}
+
+static bool isSubclass(const Decl *D,
+ StringRef ClassName) {
+ using namespace ast_matchers;
+ DeclarationMatcher SubclassM = cxxRecordDecl(isSameOrDerivedFrom(ClassName));
+ return !(match(SubclassM, *D, D->getASTContext()).empty());
+}
+
+static bool isOSObjectSubclass(const Decl *D) {
+ return isSubclass(D, "OSObject");
+}
+
+static bool isOSObjectDynamicCast(StringRef S) {
+ return S == "safeMetaCast";
+}
+
+static bool isOSIteratorSubclass(const Decl *D) {
+ return isSubclass(D, "OSIterator");
+}
+
+static bool hasRCAnnotation(const Decl *D, StringRef rcAnnotation) {
+ for (const auto *Ann : D->specific_attrs<AnnotateAttr>()) {
+ if (Ann->getAnnotation() == rcAnnotation)
+ return true;
+ }
+ return false;
+}
+
+static bool isRetain(const FunctionDecl *FD, StringRef FName) {
+ return FName.startswith_lower("retain") || FName.endswith_lower("retain");
+}
+
+static bool isRelease(const FunctionDecl *FD, StringRef FName) {
+ return FName.startswith_lower("release") || FName.endswith_lower("release");
+}
+
+static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
+ return FName.startswith_lower("autorelease") ||
+ FName.endswith_lower("autorelease");
+}
+
+static bool isMakeCollectable(StringRef FName) {
+ return FName.contains_lower("MakeCollectable");
+}
+
+/// A function is OSObject related if it is declared on a subclass
+/// of OSObject, or any of the parameters is a subclass of an OSObject.
+static bool isOSObjectRelated(const CXXMethodDecl *MD) {
+ if (isOSObjectSubclass(MD->getParent()))
+ return true;
+
+ for (ParmVarDecl *Param : MD->parameters()) {
+ QualType PT = Param->getType()->getPointeeType();
+ if (!PT.isNull())
+ if (CXXRecordDecl *RD = PT->getAsCXXRecordDecl())
+ if (isOSObjectSubclass(RD))
+ return true;
+ }
+
+ return false;
+}
+
+const RetainSummary *
+RetainSummaryManager::getSummaryForOSObject(const FunctionDecl *FD,
+ StringRef FName, QualType RetTy) {
+ if (RetTy->isPointerType()) {
+ const CXXRecordDecl *PD = RetTy->getPointeeType()->getAsCXXRecordDecl();
+ if (PD && isOSObjectSubclass(PD)) {
+ if (const IdentifierInfo *II = FD->getIdentifier()) {
+ if (isOSObjectDynamicCast(II->getName()))
+ return getDefaultSummary();
+
+ // All objects returned with functions *not* starting with
+ // get, or iterators, are returned at +1.
+ if ((!II->getName().startswith("get") &&
+ !II->getName().startswith("Get")) ||
+ isOSIteratorSubclass(PD)) {
+ return getOSSummaryCreateRule(FD);
+ } else {
+ return getOSSummaryGetRule(FD);
+ }
+ }
+ }
+ }
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *Parent = MD->getParent();
+ if (TrackOSObjects && Parent && isOSObjectSubclass(Parent)) {
+ if (FName == "release")
+ return getOSSummaryReleaseRule(FD);
+
+ if (FName == "retain")
+ return getOSSummaryRetainRule(FD);
+
+ if (FName == "free")
+ return getOSSummaryFreeRule(FD);
+
+ if (MD->getOverloadedOperator() == OO_New)
+ return getOSSummaryCreateRule(MD);
+ }
+ }
+
+ return nullptr;
+}
+
+const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
+ const FunctionDecl *FD,
+ StringRef FName,
+ QualType RetTy,
+ const FunctionType *FT,
+ bool &AllowAnnotations) {
+
+ ArgEffects ScratchArgs(AF.getEmptyMap());
+
+ std::string RetTyName = RetTy.getAsString();
+ if (FName == "pthread_create" || FName == "pthread_setspecific") {
+ // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
+ // This will be addressed better with IPA.
+ return getPersistentStopSummary();
+ } else if(FName == "NSMakeCollectable") {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ AllowAnnotations = false;
+ return RetTy->isObjCIdType() ? getUnarySummary(FT, DoNothing)
+ : getPersistentStopSummary();
+ } else if (FName == "CMBufferQueueDequeueAndRetain" ||
+ FName == "CMBufferQueueDequeueIfDataReadyAndRetain") {
+ // Part of: <rdar://problem/39390714>.
+ return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF),
+ ScratchArgs,
+ ArgEffect(DoNothing),
+ ArgEffect(DoNothing));
+ } else if (FName == "CFPlugInInstanceCreate") {
+ return getPersistentSummary(RetEffect::MakeNoRet(), ScratchArgs);
+ } else if (FName == "IORegistryEntrySearchCFProperty" ||
+ (RetTyName == "CFMutableDictionaryRef" &&
+ (FName == "IOBSDNameMatching" || FName == "IOServiceMatching" ||
+ FName == "IOServiceNameMatching" ||
+ FName == "IORegistryEntryIDMatching" ||
+ FName == "IOOpenFirmwarePathMatching"))) {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF), ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "IOServiceGetMatchingService" ||
+ FName == "IOServiceGetMatchingServices") {
+ // FIXES: <rdar://problem/6326900>
+ // This should be addressed using a API table. This strcmp is also
+ // a little gross, but there is no need to super optimize here.
+ ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(DecRef, ObjKind::CF));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "IOServiceAddNotification" ||
+ FName == "IOServiceAddMatchingNotification") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ ScratchArgs = AF.add(ScratchArgs, 2, ArgEffect(DecRef, ObjKind::CF));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "CVPixelBufferCreateWithBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithBytes is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ // FIXME: This function has an out parameter that returns an
+ // allocated object.
+ ScratchArgs = AF.add(ScratchArgs, 7, ArgEffect(StopTracking));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "CGBitmapContextCreateWithData") {
+ // FIXES: <rdar://problem/7358899>
+ // Eventually this can be improved by recognizing that 'releaseInfo'
+ // passed to CGBitmapContextCreateWithData is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ ScratchArgs = AF.add(ScratchArgs, 8, ArgEffect(ArgEffect(StopTracking)));
+ return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF), ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+ // via a callback and doing full IPA to make sure this is done
+ // correctly.
+ ScratchArgs = AF.add(ScratchArgs, 12, ArgEffect(StopTracking));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "VTCompressionSessionEncodeFrame") {
+ // The context argument passed to VTCompressionSessionEncodeFrame()
+ // is passed to the callback specified when creating the session
+ // (e.g. with VTCompressionSessionCreate()) which can release it.
+ // To account for this possibility, conservatively stop tracking
+ // the context.
+ ScratchArgs = AF.add(ScratchArgs, 5, ArgEffect(StopTracking));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName == "dispatch_set_context" ||
+ FName == "xpc_connection_set_context") {
+ // <rdar://problem/11059275> - The analyzer currently doesn't have
+ // a good way to reason about the finalizer function for libdispatch.
+ // If we pass a context object that is memory managed, stop tracking it.
+ // <rdar://problem/13783514> - Same problem, but for XPC.
+ // FIXME: this hack should possibly go away once we can handle
+ // libdispatch and XPC finalizers.
+ ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(StopTracking));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+ } else if (FName.startswith("NSLog")) {
+ return getDoNothingSummary();
+ } else if (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) {
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove. (radar://11152419)
+ ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(StopTracking));
+ ScratchArgs = AF.add(ScratchArgs, 2, ArgEffect(StopTracking));
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs, ArgEffect(DoNothing),
+ ArgEffect(DoNothing));
+ }
+
+ if (RetTy->isPointerType()) {
+
+ // For CoreFoundation ('CF') types.
+ if (cocoa::isRefType(RetTy, "CF", FName)) {
+ if (isRetain(FD, FName)) {
+ // CFRetain isn't supposed to be annotated. However, this may as
+ // well be a user-made "safe" CFRetain function that is incorrectly
+ // annotated as cf_returns_retained due to lack of better options.
+ // We want to ignore such annotation.
+ AllowAnnotations = false;
+
+ return getUnarySummary(FT, IncRef);
+ } else if (isAutorelease(FD, FName)) {
+ // The headers use cf_consumed, but we can fully model CFAutorelease
+ // ourselves.
+ AllowAnnotations = false;
+
+ return getUnarySummary(FT, Autorelease);
+ } else if (isMakeCollectable(FName)) {
+ AllowAnnotations = false;
+ return getUnarySummary(FT, DoNothing);
+ } else {
+ return getCFCreateGetRuleSummary(FD);
+ }
+ }
+
+ // For CoreGraphics ('CG') and CoreVideo ('CV') types.
+ if (cocoa::isRefType(RetTy, "CG", FName) ||
+ cocoa::isRefType(RetTy, "CV", FName)) {
+ if (isRetain(FD, FName))
+ return getUnarySummary(FT, IncRef);
+ else
+ return getCFCreateGetRuleSummary(FD);
+ }
+
+ // For all other CF-style types, use the Create/Get
+ // rule for summaries but don't support Retain functions
+ // with framework-specific prefixes.
+ if (coreFoundation::isCFObjectRef(RetTy)) {
+ return getCFCreateGetRuleSummary(FD);
+ }
+
+ if (FD->hasAttr<CFAuditedTransferAttr>()) {
+ return getCFCreateGetRuleSummary(FD);
+ }
+ }
+
+ // Check for release functions, the only kind of functions that we care
+ // about that don't return a pointer type.
+ if (FName.startswith("CG") || FName.startswith("CF")) {
+ // Test for 'CGCF'.
+ FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+
+ if (isRelease(FD, FName))
+ return getUnarySummary(FT, DecRef);
+ else {
+ assert(ScratchArgs.isEmpty());
+ // Remaining CoreFoundation and CoreGraphics functions.
+ // We use to assume that they all strictly followed the ownership idiom
+ // and that ownership cannot be transferred. While this is technically
+ // correct, many methods allow a tracked object to escape. For example:
+ //
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFDictionaryAddValue(y, key, x);
+ // CFRelease(x);
+ // ... it is okay to use 'x' since 'y' has a reference to it
+ //
+ // We handle this and similar cases with the follow heuristic. If the
+ // function name contains "InsertValue", "SetValue", "AddValue",
+ // "AppendValue", or "SetAttribute", then we assume that arguments may
+ // "escape." This means that something else holds on to the object,
+ // allowing it be used even after its local retain count drops to 0.
+ ArgEffectKind E =
+ (StrInStrNoCase(FName, "InsertValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
+ ? MayEscape
+ : DoNothing;
+
+ return getPersistentSummary(RetEffect::MakeNoRet(), ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(E, ObjKind::CF));
+ }
+ }
+
+ return nullptr;
+}
+
+const RetainSummary *
+RetainSummaryManager::generateSummary(const FunctionDecl *FD,
+ bool &AllowAnnotations) {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit())
+ return getPersistentStopSummary();
+
+ const IdentifierInfo *II = FD->getIdentifier();
+
+ StringRef FName = II ? II->getName() : "";
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // Inspect the result type. Strip away any typedefs.
+ const auto *FT = FD->getType()->getAs<FunctionType>();
+ QualType RetTy = FT->getReturnType();
+
+ if (TrackOSObjects)
+ if (const RetainSummary *S = getSummaryForOSObject(FD, FName, RetTy))
+ return S;
+
+ if (TrackObjCAndCFObjects)
+ if (const RetainSummary *S =
+ getSummaryForObjCOrCFObject(FD, FName, RetTy, FT, AllowAnnotations))
+ return S;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (!(TrackOSObjects && isOSObjectRelated(MD)))
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ArgEffects(AF.getEmptyMap()),
+ ArgEffect(DoNothing),
+ ArgEffect(StopTracking),
+ ArgEffect(DoNothing));
+
+ return getDefaultSummary();
+}
+
+const RetainSummary *
+RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
+ // If we don't know what function we're calling, use our default summary.
+ if (!FD)
+ return getDefaultSummary();
+
+ // Look up a summary in our cache of FunctionDecls -> Summaries.
+ FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+ if (I != FuncSummaries.end())
+ return I->second;
+
+ // No summary? Generate one.
+ bool AllowAnnotations = true;
+ const RetainSummary *S = generateSummary(FD, AllowAnnotations);
+
+ // Annotations override defaults.
+ if (AllowAnnotations)
+ updateSummaryFromAnnotations(S, FD);
+
+ FuncSummaries[FD] = S;
+ return S;
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
+ switch (E.getKind()) {
+ case DoNothing:
+ case Autorelease:
+ case DecRefBridgedTransferred:
+ case IncRef:
+ case UnretainedOutParameter:
+ case RetainedOutParameter:
+ case RetainedOutParameterOnZero:
+ case RetainedOutParameterOnNonZero:
+ case MayEscape:
+ case StopTracking:
+ case StopTrackingHard:
+ return E.withKind(StopTrackingHard);
+ case DecRef:
+ case DecRefAndStopTrackingHard:
+ return E.withKind(DecRefAndStopTrackingHard);
+ case Dealloc:
+ return E.withKind(Dealloc);
+ }
+
+ llvm_unreachable("Unknown ArgEffect kind");
+}
+
+void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
+ const CallEvent &Call) {
+ if (Call.hasNonZeroCallbackArg()) {
+ ArgEffect RecEffect =
+ getStopTrackingHardEquivalent(S->getReceiverEffect());
+ ArgEffect DefEffect =
+ getStopTrackingHardEquivalent(S->getDefaultArgEffect());
+
+ ArgEffects ScratchArgs(AF.getEmptyMap());
+ ArgEffects CustomArgEffects = S->getArgEffects();
+ for (ArgEffects::iterator I = CustomArgEffects.begin(),
+ E = CustomArgEffects.end();
+ I != E; ++I) {
+ ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
+ if (Translated.getKind() != DefEffect.getKind())
+ ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
+ }
+
+ RetEffect RE = RetEffect::MakeNoRetHard();
+
+ // Special cases where the callback argument CANNOT free the return value.
+ // This can generally only happen if we know that the callback will only be
+ // called when the return value is already being deallocated.
+ if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
+ if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
+ // When the CGBitmapContext is deallocated, the callback here will free
+ // the associated data buffer.
+ // The callback in dispatch_data_create frees the buffer, but not
+ // the data object.
+ if (Name->isStr("CGBitmapContextCreateWithData") ||
+ Name->isStr("dispatch_data_create"))
+ RE = S->getRetEffect();
+ }
+ }
+
+ S = getPersistentSummary(RE, ScratchArgs, RecEffect, DefEffect);
+ }
+
+ // Special case '[super init];' and '[self init];'
+ //
+ // Even though calling '[super init]' without assigning the result to self
+ // and checking if the parent returns 'nil' is a bad pattern, it is common.
+ // Additionally, our Self Init checker already warns about it. To avoid
+ // overwhelming the user with messages from both checkers, we model the case
+ // of '[super init]' in cases when it is not consumed by another expression
+ // as if the call preserves the value of 'self'; essentially, assuming it can
+ // never fail and return 'nil'.
+ // Note, we don't want to just stop tracking the value since we want the
+ // RetainCount checker to report leaks and use-after-free if SelfInit checker
+ // is turned off.
+ if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+ if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
+
+ // Check if the message is not consumed, we know it will not be used in
+ // an assignment, ex: "self = [super init]".
+ const Expr *ME = MC->getOriginExpr();
+ const LocationContext *LCtx = MC->getLocationContext();
+ ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
+ if (!PM.isConsumedExpr(ME)) {
+ RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
+ ModifiableSummaryTemplate->setReceiverEffect(ArgEffect(DoNothing));
+ ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
+ }
+ }
+ }
+}
+
+const RetainSummary *
+RetainSummaryManager::getSummary(const CallEvent &Call,
+ QualType ReceiverType) {
+ const RetainSummary *Summ;
+ switch (Call.getKind()) {
+ case CE_Function:
+ case CE_CXXMember:
+ case CE_CXXMemberOperator:
+ case CE_CXXConstructor:
+ case CE_CXXAllocator:
+ Summ = getFunctionSummary(cast_or_null<FunctionDecl>(Call.getDecl()));
+ break;
+ case CE_Block:
+ case CE_CXXDestructor:
+ // FIXME: These calls are currently unsupported.
+ return getPersistentStopSummary();
+ case CE_ObjCMessage: {
+ const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+ if (Msg.isInstanceMessage())
+ Summ = getInstanceMethodSummary(Msg, ReceiverType);
+ else
+ Summ = getClassMethodSummary(Msg);
+ break;
+ }
+ }
+
+ updateSummaryForCall(Summ, Call);
+
+ assert(Summ && "Unknown call type?");
+ return Summ;
+}
+
+
+const RetainSummary *
+RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
+ if (coreFoundation::followsCreateRule(FD))
+ return getCFSummaryCreateRule(FD);
+
+ return getCFSummaryGetRule(FD);
+}
+
+bool RetainSummaryManager::isTrustedReferenceCountImplementation(
+ const FunctionDecl *FD) {
+ return hasRCAnnotation(FD, "rc_ownership_trusted_implementation");
+}
+
+Optional<RetainSummaryManager::BehaviorSummary>
+RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
+ bool &hasTrustedImplementationAnnotation) {
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return None;
+
+ StringRef FName = II->getName();
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ QualType ResultTy = CE->getCallReturnType(Ctx);
+ if (ResultTy->isObjCIdType()) {
+ if (II->isStr("NSMakeCollectable"))
+ return BehaviorSummary::Identity;
+ } else if (ResultTy->isPointerType()) {
+ // Handle: (CF|CG|CV)Retain
+ // CFAutorelease
+ // It's okay to be a little sloppy here.
+ if (FName == "CMBufferQueueDequeueAndRetain" ||
+ FName == "CMBufferQueueDequeueIfDataReadyAndRetain") {
+ // Part of: <rdar://problem/39390714>.
+ // These are not retain. They just return something and retain it.
+ return None;
+ }
+ if (cocoa::isRefType(ResultTy, "CF", FName) ||
+ cocoa::isRefType(ResultTy, "CG", FName) ||
+ cocoa::isRefType(ResultTy, "CV", FName))
+ if (isRetain(FD, FName) || isAutorelease(FD, FName) ||
+ isMakeCollectable(FName))
+ return BehaviorSummary::Identity;
+
+ // safeMetaCast is called by OSDynamicCast.
+ // We assume that OSDynamicCast is either an identity (cast is OK,
+ // the input was non-zero),
+ // or that it returns zero (when the cast failed, or the input
+ // was zero).
+ if (TrackOSObjects && isOSObjectDynamicCast(FName)) {
+ return BehaviorSummary::IdentityOrZero;
+ }
+
+ const FunctionDecl* FDD = FD->getDefinition();
+ if (FDD && isTrustedReferenceCountImplementation(FDD)) {
+ hasTrustedImplementationAnnotation = true;
+ return BehaviorSummary::Identity;
+ }
+ }
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *Parent = MD->getParent();
+ if (TrackOSObjects && Parent && isOSObjectSubclass(Parent))
+ if (FName == "release" || FName == "retain")
+ return BehaviorSummary::NoOp;
+ }
+
+ return None;
+}
+
+const RetainSummary *
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+ ArgEffectKind AE) {
+
+ // Unary functions have no arg effects by definition.
+ ArgEffects ScratchArgs(AF.getEmptyMap());
+
+ // Sanity check that this is *really* a unary function. This can
+ // happen if people do weird things.
+ const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+ if (!FTP || FTP->getNumParams() != 1)
+ return getPersistentStopSummary();
+
+ ArgEffect Effect(AE, ObjKind::CF);
+
+ ScratchArgs = AF.add(ScratchArgs, 0, Effect);
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ ScratchArgs,
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+}
+
+const RetainSummary *
+RetainSummaryManager::getOSSummaryRetainRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ AF.getEmptyMap(),
+ /*ReceiverEff=*/ArgEffect(DoNothing),
+ /*DefaultEff=*/ArgEffect(DoNothing),
+ /*ThisEff=*/ArgEffect(IncRef, ObjKind::OS));
+}
+
+const RetainSummary *
+RetainSummaryManager::getOSSummaryReleaseRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ AF.getEmptyMap(),
+ /*ReceiverEff=*/ArgEffect(DoNothing),
+ /*DefaultEff=*/ArgEffect(DoNothing),
+ /*ThisEff=*/ArgEffect(DecRef, ObjKind::OS));
+}
+
+const RetainSummary *
+RetainSummaryManager::getOSSummaryFreeRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ AF.getEmptyMap(),
+ /*ReceiverEff=*/ArgEffect(DoNothing),
+ /*DefaultEff=*/ArgEffect(DoNothing),
+ /*ThisEff=*/ArgEffect(Dealloc, ObjKind::OS));
+}
+
+const RetainSummary *
+RetainSummaryManager::getOSSummaryCreateRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeOwned(ObjKind::OS),
+ AF.getEmptyMap());
+}
+
+const RetainSummary *
+RetainSummaryManager::getOSSummaryGetRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNotOwned(ObjKind::OS),
+ AF.getEmptyMap());
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF),
+ ArgEffects(AF.getEmptyMap()));
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
+ return getPersistentSummary(RetEffect::MakeNotOwned(ObjKind::CF),
+ ArgEffects(AF.getEmptyMap()),
+ ArgEffect(DoNothing), ArgEffect(DoNothing));
+}
+
+
+
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+Optional<RetEffect>
+RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D) {
+ if (hasAnyEnabledAttrOf<NSReturnsRetainedAttr>(D, RetTy))
+ return ObjCAllocRetE;
+
+ if (auto K = hasAnyEnabledAttrOf<CFReturnsRetainedAttr, OSReturnsRetainedAttr,
+ GeneralizedReturnsRetainedAttr>(D, RetTy))
+ return RetEffect::MakeOwned(*K);
+
+ if (auto K = hasAnyEnabledAttrOf<
+ CFReturnsNotRetainedAttr, OSReturnsNotRetainedAttr,
+ GeneralizedReturnsNotRetainedAttr, NSReturnsNotRetainedAttr,
+ NSReturnsAutoreleasedAttr>(D, RetTy))
+ return RetEffect::MakeNotOwned(*K);
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ for (const auto *PD : MD->overridden_methods())
+ if (auto RE = getRetEffectFromAnnotations(RetTy, PD))
+ return RE;
+
+ return None;
+}
+
+/// \return Whether the chain of typedefs starting from {@code QT}
+/// has a typedef with a given name {@code Name}.
+static bool hasTypedefNamed(QualType QT,
+ StringRef Name) {
+ while (auto *T = dyn_cast<TypedefType>(QT)) {
+ const auto &Context = T->getDecl()->getASTContext();
+ if (T->getDecl()->getIdentifier() == &Context.Idents.get(Name))
+ return true;
+ QT = T->getDecl()->getUnderlyingType();
+ }
+ return false;
+}
+
+static QualType getCallableReturnType(const NamedDecl *ND) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
+ return FD->getReturnType();
+ } else if (const auto *MD = dyn_cast<ObjCMethodDecl>(ND)) {
+ return MD->getReturnType();
+ } else {
+ llvm_unreachable("Unexpected decl");
+ }
+}
+
+bool RetainSummaryManager::applyParamAnnotationEffect(
+ const ParmVarDecl *pd, unsigned parm_idx, const NamedDecl *FD,
+ RetainSummaryTemplate &Template) {
+ QualType QT = pd->getType();
+ if (auto K =
+ hasAnyEnabledAttrOf<NSConsumedAttr, CFConsumedAttr, OSConsumedAttr,
+ GeneralizedConsumedAttr>(pd, QT)) {
+ Template->addArg(AF, parm_idx, ArgEffect(DecRef, *K));
+ return true;
+ } else if (auto K = hasAnyEnabledAttrOf<
+ CFReturnsRetainedAttr, OSReturnsRetainedAttr,
+ OSReturnsRetainedOnNonZeroAttr, OSReturnsRetainedOnZeroAttr,
+ GeneralizedReturnsRetainedAttr>(pd, QT)) {
+
+ // For OSObjects, we try to guess whether the object is created based
+ // on the return value.
+ if (K == ObjKind::OS) {
+ QualType QT = getCallableReturnType(FD);
+
+ bool HasRetainedOnZero = pd->hasAttr<OSReturnsRetainedOnZeroAttr>();
+ bool HasRetainedOnNonZero = pd->hasAttr<OSReturnsRetainedOnNonZeroAttr>();
+
+ // The usual convention is to create an object on non-zero return, but
+ // it's reverted if the typedef chain has a typedef kern_return_t,
+ // because kReturnSuccess constant is defined as zero.
+ // The convention can be overwritten by custom attributes.
+ bool SuccessOnZero =
+ HasRetainedOnZero ||
+ (hasTypedefNamed(QT, "kern_return_t") && !HasRetainedOnNonZero);
+ bool ShouldSplit = !QT.isNull() && !QT->isVoidType();
+ ArgEffectKind AK = RetainedOutParameter;
+ if (ShouldSplit && SuccessOnZero) {
+ AK = RetainedOutParameterOnZero;
+ } else if (ShouldSplit && (!SuccessOnZero || HasRetainedOnNonZero)) {
+ AK = RetainedOutParameterOnNonZero;
+ }
+ Template->addArg(AF, parm_idx, ArgEffect(AK, ObjKind::OS));
+ }
+
+ // For others:
+ // Do nothing. Retained out parameters will either point to a +1 reference
+ // or NULL, but the way you check for failure differs depending on the
+ // API. Consequently, we don't have a good way to track them yet.
+ return true;
+ } else if (auto K = hasAnyEnabledAttrOf<CFReturnsNotRetainedAttr,
+ OSReturnsNotRetainedAttr,
+ GeneralizedReturnsNotRetainedAttr>(
+ pd, QT)) {
+ Template->addArg(AF, parm_idx, ArgEffect(UnretainedOutParameter, *K));
+ return true;
+ }
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ for (const auto *OD : MD->overridden_methods()) {
+ const ParmVarDecl *OP = OD->parameters()[parm_idx];
+ if (applyParamAnnotationEffect(OP, parm_idx, OD, Template))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const FunctionDecl *FD) {
+ if (!FD)
+ return;
+
+ assert(Summ && "Must have a summary to add annotations to.");
+ RetainSummaryTemplate Template(Summ, *this);
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (auto pi = FD->param_begin(),
+ pe = FD->param_end(); pi != pe; ++pi, ++parm_idx)
+ applyParamAnnotationEffect(*pi, parm_idx, FD, Template);
+
+ QualType RetTy = FD->getReturnType();
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
+ Template->setRetEffect(*RetE);
+
+ if (hasAnyEnabledAttrOf<OSConsumesThisAttr>(FD, RetTy))
+ Template->setThisEffect(ArgEffect(DecRef, ObjKind::OS));
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const ObjCMethodDecl *MD) {
+ if (!MD)
+ return;
+
+ assert(Summ && "Must have a valid summary to add annotations to");
+ RetainSummaryTemplate Template(Summ, *this);
+
+ // Effects on the receiver.
+ if (hasAnyEnabledAttrOf<NSConsumesSelfAttr>(MD, MD->getReturnType()))
+ Template->setReceiverEffect(ArgEffect(DecRef, ObjKind::ObjC));
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (auto pi = MD->param_begin(), pe = MD->param_end(); pi != pe;
+ ++pi, ++parm_idx)
+ applyParamAnnotationEffect(*pi, parm_idx, MD, Template);
+
+ QualType RetTy = MD->getReturnType();
+ if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
+ Template->setRetEffect(*RetE);
+}
+
+const RetainSummary *
+RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy) {
+ // Any special effects?
+ ArgEffect ReceiverEff = ArgEffect(DoNothing, ObjKind::ObjC);
+ RetEffect ResultEff = RetEffect::MakeNoRet();
+
+ // Check the method family, and apply any default annotations.
+ switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
+ case OMF_None:
+ case OMF_initialize:
+ case OMF_performSelector:
+ // Assume all Objective-C methods follow Cocoa Memory Management rules.
+ // FIXME: Does the non-threaded performSelector family really belong here?
+ // The selector could be, say, @selector(copy).
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = RetEffect::MakeNotOwned(ObjKind::ObjC);
+ else if (coreFoundation::isCFObjectRef(RetTy)) {
+ // ObjCMethodDecl currently doesn't consider CF objects as valid return
+ // values for alloc, new, copy, or mutableCopy, so we have to
+ // double-check with the selector. This is ugly, but there aren't that
+ // many Objective-C methods that return CF objects, right?
+ if (MD) {
+ switch (S.getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ ResultEff = RetEffect::MakeOwned(ObjKind::CF);
+ break;
+ default:
+ ResultEff = RetEffect::MakeNotOwned(ObjKind::CF);
+ break;
+ }
+ } else {
+ ResultEff = RetEffect::MakeNotOwned(ObjKind::CF);
+ }
+ }
+ break;
+ case OMF_init:
+ ResultEff = ObjCInitRetE;
+ ReceiverEff = ArgEffect(DecRef, ObjKind::ObjC);
+ break;
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = ObjCAllocRetE;
+ else if (coreFoundation::isCFObjectRef(RetTy))
+ ResultEff = RetEffect::MakeOwned(ObjKind::CF);
+ break;
+ case OMF_autorelease:
+ ReceiverEff = ArgEffect(Autorelease, ObjKind::ObjC);
+ break;
+ case OMF_retain:
+ ReceiverEff = ArgEffect(IncRef, ObjKind::ObjC);
+ break;
+ case OMF_release:
+ ReceiverEff = ArgEffect(DecRef, ObjKind::ObjC);
+ break;
+ case OMF_dealloc:
+ ReceiverEff = ArgEffect(Dealloc, ObjKind::ObjC);
+ break;
+ case OMF_self:
+ // -self is handled specially by the ExprEngine to propagate the receiver.
+ break;
+ case OMF_retainCount:
+ case OMF_finalize:
+ // These methods don't return objects.
+ break;
+ }
+
+ // If one of the arguments in the selector has the keyword 'delegate' we
+ // should stop tracking the reference count for the receiver. This is
+ // because the reference count is quite possibly handled by a delegate
+ // method.
+ if (S.isKeywordSelector()) {
+ for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
+ StringRef Slot = S.getNameForSlot(i);
+ if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
+ if (ResultEff == ObjCInitRetE)
+ ResultEff = RetEffect::MakeNoRetHard();
+ else
+ ReceiverEff = ArgEffect(StopTrackingHard, ObjKind::ObjC);
+ }
+ }
+ }
+
+ if (ReceiverEff.getKind() == DoNothing &&
+ ResultEff.getKind() == RetEffect::NoRet)
+ return getDefaultSummary();
+
+ return getPersistentSummary(ResultEff, ArgEffects(AF.getEmptyMap()),
+ ArgEffect(ReceiverEff), ArgEffect(MayEscape));
+}
+
+const RetainSummary *RetainSummaryManager::getInstanceMethodSummary(
+ const ObjCMethodCall &Msg,
+ QualType ReceiverType) {
+ const ObjCInterfaceDecl *ReceiverClass = nullptr;
+
+ // We do better tracking of the type of the object than the core ExprEngine.
+ // See if we have its type in our private state.
+ if (!ReceiverType.isNull())
+ if (const auto *PT = ReceiverType->getAs<ObjCObjectPointerType>())
+ ReceiverClass = PT->getInterfaceDecl();
+
+ // If we don't know what kind of object this is, fall back to its static type.
+ if (!ReceiverClass)
+ ReceiverClass = Msg.getReceiverInterface();
+
+ // FIXME: The receiver could be a reference to a class, meaning that
+ // we should use the class method.
+ // id x = [NSObject class];
+ // [x performSelector:... withObject:... afterDelay:...];
+ Selector S = Msg.getSelector();
+ const ObjCMethodDecl *Method = Msg.getDecl();
+ if (!Method && ReceiverClass)
+ Method = ReceiverClass->getInstanceMethod(S);
+
+ return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
+ ObjCMethodSummaries);
+}
+
+const RetainSummary *
+RetainSummaryManager::getMethodSummary(Selector S,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD, QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries) {
+
+ // Objective-C method summaries are only applicable to ObjC and CF objects.
+ if (!TrackObjCAndCFObjects)
+ return getDefaultSummary();
+
+ // Look up a summary in our summary cache.
+ const RetainSummary *Summ = CachedSummaries.find(ID, S);
+
+ if (!Summ) {
+ Summ = getStandardMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(Summ, MD);
+
+ // Memoize the summary.
+ CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
+ }
+
+ return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+ ArgEffects ScratchArgs = AF.getEmptyMap();
+
+ // Create the [NSAssertionHandler currentHander] summary.
+ addClassMethSummary("NSAssertionHandler", "currentHandler",
+ getPersistentSummary(RetEffect::MakeNotOwned(ObjKind::ObjC),
+ ScratchArgs));
+
+ // Create the [NSAutoreleasePool addObject:] summary.
+ ScratchArgs = AF.add(ScratchArgs, 0, ArgEffect(Autorelease));
+ addClassMethSummary("NSAutoreleasePool", "addObject",
+ getPersistentSummary(RetEffect::MakeNoRet(), ScratchArgs,
+ ArgEffect(DoNothing),
+ ArgEffect(Autorelease)));
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+ ArgEffects ScratchArgs = AF.getEmptyMap();
+ // Create the "init" selector. It just acts as a pass-through for the
+ // receiver.
+ const RetainSummary *InitSumm = getPersistentSummary(
+ ObjCInitRetE, ScratchArgs, ArgEffect(DecRef, ObjKind::ObjC));
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+ // awakeAfterUsingCoder: behaves basically like an 'init' method. It
+ // claims the receiver and returns a retained object.
+ addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+ InitSumm);
+
+ // The next methods are allocators.
+ const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE,
+ ScratchArgs);
+ const RetainSummary *CFAllocSumm =
+ getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF), ScratchArgs);
+
+ // Create the "retain" selector.
+ RetEffect NoRet = RetEffect::MakeNoRet();
+ const RetainSummary *Summ = getPersistentSummary(
+ NoRet, ScratchArgs, ArgEffect(IncRef, ObjKind::ObjC));
+ addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+ // Create the "release" selector.
+ Summ = getPersistentSummary(NoRet, ScratchArgs,
+ ArgEffect(DecRef, ObjKind::ObjC));
+ addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+ // Create the -dealloc summary.
+ Summ = getPersistentSummary(NoRet, ScratchArgs, ArgEffect(Dealloc,
+ ObjKind::ObjC));
+ addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+ // Create the "autorelease" selector.
+ Summ = getPersistentSummary(NoRet, ScratchArgs, ArgEffect(Autorelease,
+ ObjKind::ObjC));
+ addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+ // For NSWindow, allocated objects are (initially) self-owned.
+ // FIXME: For now we opt for false negatives with NSWindow, as these objects
+ // self-own themselves. However, they only do this once they are displayed.
+ // Thus, we need to track an NSWindow's display status.
+ // This is tracked in <rdar://problem/6062711>.
+ // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+ const RetainSummary *NoTrackYet =
+ getPersistentSummary(RetEffect::MakeNoRet(), ScratchArgs,
+ ArgEffect(StopTracking), ArgEffect(StopTracking));
+
+ addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+ // For NSPanel (which subclasses NSWindow), allocated objects are not
+ // self-owned.
+ // FIXME: For now we don't track NSPanels. object for the same reason
+ // as for NSWindow objects.
+ addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+ // For NSNull, objects returned by +null are singletons that ignore
+ // retain/release semantics. Just don't track them.
+ // <rdar://problem/12858915>
+ addClassMethSummary("NSNull", "null", NoTrackYet);
+
+ // Don't track allocated autorelease pools, as it is okay to prematurely
+ // exit a method.
+ addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+ addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
+ addClassMethSummary("NSAutoreleasePool", "new", NoTrackYet);
+
+ // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+ addInstMethSummary("QCRenderer", AllocSumm, "createSnapshotImageOfType");
+ addInstMethSummary("QCView", AllocSumm, "createSnapshotImageOfType");
+
+ // Create summaries for CIContext, 'createCGImage' and
+ // 'createCGLayerWithSize'. These objects are CF objects, and are not
+ // automatically garbage collected.
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect");
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect",
+ "format", "colorSpace");
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info");
+}
+
+CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
+ ASTContext &Ctx = MD->getASTContext();
+ LangOptions L = Ctx.getLangOpts();
+ RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
+ /*TrackNSAndCFObjects=*/true,
+ /*TrackOSObjects=*/false);
+ const RetainSummary *S = M.getMethodSummary(MD);
+ CallEffects CE(S->getRetEffect(), S->getReceiverEffect());
+ unsigned N = MD->param_size();
+ for (unsigned i = 0; i < N; ++i) {
+ CE.Args.push_back(S->getArg(i));
+ }
+ return CE;
+}
+
+CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
+ ASTContext &Ctx = FD->getASTContext();
+ LangOptions L = Ctx.getLangOpts();
+ RetainSummaryManager M(Ctx, L.ObjCAutoRefCount,
+ /*TrackNSAndCFObjects=*/true,
+ /*TrackOSObjects=*/false);
+ const RetainSummary *S = M.getFunctionSummary(FD);
+ CallEffects CE(S->getRetEffect());
+ unsigned N = FD->param_size();
+ for (unsigned i = 0; i < N; ++i) {
+ CE.Args.push_back(S->getArg(i));
+ }
+ return CE;
+}
diff --git a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
deleted file mode 100644
index d379562bf325..000000000000
--- a/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-//== SMTConstraintManager.cpp -----------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-
-using namespace clang;
-using namespace ento;
-
-ProgramStateRef SMTConstraintManager::assumeSym(ProgramStateRef State,
- SymbolRef Sym,
- bool Assumption) {
- ASTContext &Ctx = getBasicVals().getContext();
-
- QualType RetTy;
- bool hasComparison;
-
- SMTExprRef Exp = Solver->getExpr(Ctx, Sym, &RetTy, &hasComparison);
-
- // Create zero comparison for implicit boolean cast, with reversed assumption
- if (!hasComparison && !RetTy->isBooleanType())
- return assumeExpr(State, Sym,
- Solver->getZeroExpr(Ctx, Exp, RetTy, !Assumption));
-
- return assumeExpr(State, Sym, Assumption ? Exp : Solver->mkNot(Exp));
-}
-
-ProgramStateRef SMTConstraintManager::assumeSymInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, bool InRange) {
- ASTContext &Ctx = getBasicVals().getContext();
- return assumeExpr(State, Sym,
- Solver->getRangeExpr(Ctx, Sym, From, To, InRange));
-}
-
-ProgramStateRef
-SMTConstraintManager::assumeSymUnsupported(ProgramStateRef State, SymbolRef Sym,
- bool Assumption) {
- // Skip anything that is unsupported
- return State;
-}
-
-ConditionTruthVal SMTConstraintManager::checkNull(ProgramStateRef State,
- SymbolRef Sym) {
- ASTContext &Ctx = getBasicVals().getContext();
-
- QualType RetTy;
- // The expression may be casted, so we cannot call getZ3DataExpr() directly
- SMTExprRef VarExp = Solver->getExpr(Ctx, Sym, &RetTy);
- SMTExprRef Exp = Solver->getZeroExpr(Ctx, VarExp, RetTy, /*Assumption=*/true);
-
- // Negate the constraint
- SMTExprRef NotExp =
- Solver->getZeroExpr(Ctx, VarExp, RetTy, /*Assumption=*/false);
-
- Solver->reset();
- addStateConstraints(State);
-
- Solver->push();
- Solver->addConstraint(Exp);
- ConditionTruthVal isSat = Solver->check();
-
- Solver->pop();
- Solver->addConstraint(NotExp);
- ConditionTruthVal isNotSat = Solver->check();
-
- // Zero is the only possible solution
- if (isSat.isConstrainedTrue() && isNotSat.isConstrainedFalse())
- return true;
-
- // Zero is not a solution
- if (isSat.isConstrainedFalse() && isNotSat.isConstrainedTrue())
- return false;
-
- // Zero may be a solution
- return ConditionTruthVal();
-}
-
-const llvm::APSInt *SMTConstraintManager::getSymVal(ProgramStateRef State,
- SymbolRef Sym) const {
- BasicValueFactory &BVF = getBasicVals();
- ASTContext &Ctx = BVF.getContext();
-
- if (const SymbolData *SD = dyn_cast<SymbolData>(Sym)) {
- QualType Ty = Sym->getType();
- assert(!Ty->isRealFloatingType());
- llvm::APSInt Value(Ctx.getTypeSize(Ty),
- !Ty->isSignedIntegerOrEnumerationType());
-
- SMTExprRef Exp =
- Solver->fromData(SD->getSymbolID(), Ty, Ctx.getTypeSize(Ty));
-
- Solver->reset();
- addStateConstraints(State);
-
- // Constraints are unsatisfiable
- ConditionTruthVal isSat = Solver->check();
- if (!isSat.isConstrainedTrue())
- return nullptr;
-
- // Model does not assign interpretation
- if (!Solver->getInterpretation(Exp, Value))
- return nullptr;
-
- // A value has been obtained, check if it is the only value
- SMTExprRef NotExp = Solver->fromBinOp(
- Exp, BO_NE,
- Ty->isBooleanType() ? Solver->fromBoolean(Value.getBoolValue())
- : Solver->fromAPSInt(Value),
- false);
-
- Solver->addConstraint(NotExp);
-
- ConditionTruthVal isNotSat = Solver->check();
- if (isNotSat.isConstrainedTrue())
- return nullptr;
-
- // This is the only solution, store it
- return &BVF.getValue(Value);
- }
-
- if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym)) {
- SymbolRef CastSym = SC->getOperand();
- QualType CastTy = SC->getType();
- // Skip the void type
- if (CastTy->isVoidType())
- return nullptr;
-
- const llvm::APSInt *Value;
- if (!(Value = getSymVal(State, CastSym)))
- return nullptr;
- return &BVF.Convert(SC->getType(), *Value);
- }
-
- if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- const llvm::APSInt *LHS, *RHS;
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE)) {
- LHS = getSymVal(State, SIE->getLHS());
- RHS = &SIE->getRHS();
- } else if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE)) {
- LHS = &ISE->getLHS();
- RHS = getSymVal(State, ISE->getRHS());
- } else if (const SymSymExpr *SSM = dyn_cast<SymSymExpr>(BSE)) {
- // Early termination to avoid expensive call
- LHS = getSymVal(State, SSM->getLHS());
- RHS = LHS ? getSymVal(State, SSM->getRHS()) : nullptr;
- } else {
- llvm_unreachable("Unsupported binary expression to get symbol value!");
- }
-
- if (!LHS || !RHS)
- return nullptr;
-
- llvm::APSInt ConvertedLHS, ConvertedRHS;
- QualType LTy, RTy;
- std::tie(ConvertedLHS, LTy) = Solver->fixAPSInt(Ctx, *LHS);
- std::tie(ConvertedRHS, RTy) = Solver->fixAPSInt(Ctx, *RHS);
- Solver->doIntTypeConversion<llvm::APSInt, &SMTSolver::castAPSInt>(
- Ctx, ConvertedLHS, LTy, ConvertedRHS, RTy);
- return BVF.evalAPSInt(BSE->getOpcode(), ConvertedLHS, ConvertedRHS);
- }
-
- llvm_unreachable("Unsupported expression to get symbol value!");
-}
-
-ConditionTruthVal
-SMTConstraintManager::checkModel(ProgramStateRef State,
- const SMTExprRef &Exp) const {
- Solver->reset();
- Solver->addConstraint(Exp);
- addStateConstraints(State);
- return Solver->check();
-}
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index f292dca8e99f..6c0d487c8a87 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -271,8 +271,8 @@ DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
/// Return a memory region for the 'this' object reference.
loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
const StackFrameContext *SFC) {
- return loc::MemRegionVal(getRegionManager().
- getCXXThisRegion(D->getThisType(getContext()), SFC));
+ return loc::MemRegionVal(
+ getRegionManager().getCXXThisRegion(D->getThisType(), SFC));
}
/// Return a memory region for the 'this' object reference.
@@ -362,9 +362,9 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return None;
ASTContext &Ctx = getContext();
- llvm::APSInt Result;
+ Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, Ctx))
- return makeIntVal(Result);
+ return makeIntVal(Result.Val.getInt());
if (Loc::isLocType(E->getType()))
if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
@@ -375,8 +375,7 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
}
}
-SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
- BinaryOperator::Opcode Op,
+SVal SValBuilder::makeSymExprValNN(BinaryOperator::Opcode Op,
NonLoc LHS, NonLoc RHS,
QualType ResultTy) {
const SymExpr *symLHS = LHS.getAsSymExpr();
@@ -385,8 +384,8 @@ SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
// TODO: When the Max Complexity is reached, we should conjure a symbol
// instead of generating an Unknown value and propagate the taint info to it.
const unsigned MaxComp = StateMgr.getOwningEngine()
- ->getAnalysisManager()
- .options.getMaxSymbolComplexity();
+ .getAnalysisManager()
+ .options.MaxSymbolComplexity;
if (symLHS && symRHS &&
(symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index 559ca2c9840d..933c5c330072 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -85,7 +85,7 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
- return X->getLoc().getAsLocSymbol();
+ return X->getLoc().getAsLocSymbol(IncludeBaseRegions);
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
const MemRegion *R = X->getRegion();
@@ -171,6 +171,10 @@ const TypedValueRegion *nonloc::LazyCompoundVal::getRegion() const {
return static_cast<const LazyCompoundValData*>(Data)->getRegion();
}
+bool nonloc::PointerToMember::isNullMemberPointer() const {
+ return getPTMData().isNull();
+}
+
const DeclaratorDecl *nonloc::PointerToMember::getDecl() const {
const auto PTMD = this->getPTMData();
if (PTMD.isNull())
diff --git a/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
new file mode 100644
index 000000000000..fecbc0001079
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -0,0 +1,349 @@
+//===--- SarifDiagnostics.cpp - Sarif Diagnostics for Paths -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SarifDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Path.h"
+
+using namespace llvm;
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SarifDiagnostics : public PathDiagnosticConsumer {
+ std::string OutputFile;
+
+public:
+ SarifDiagnostics(AnalyzerOptions &, const std::string &Output)
+ : OutputFile(Output) {}
+ ~SarifDiagnostics() override = default;
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ FilesMade *FM) override;
+
+ StringRef getName() const override { return "SarifDiagnostics"; }
+ PathGenerationScheme getGenerationScheme() const override { return Minimal; }
+ bool supportsLogicalOpControlFlow() const override { return true; }
+ bool supportsCrossFileDiagnostics() const override { return true; }
+};
+} // end anonymous namespace
+
+void ento::createSarifDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+ PathDiagnosticConsumers &C,
+ const std::string &Output,
+ const Preprocessor &) {
+ C.push_back(new SarifDiagnostics(AnalyzerOpts, Output));
+}
+
+static StringRef getFileName(const FileEntry &FE) {
+ StringRef Filename = FE.tryGetRealPathName();
+ if (Filename.empty())
+ Filename = FE.getName();
+ return Filename;
+}
+
+static std::string percentEncodeURICharacter(char C) {
+ // RFC 3986 claims alpha, numeric, and this handful of
+ // characters are not reserved for the path component and
+ // should be written out directly. Otherwise, percent
+ // encode the character and write that out instead of the
+ // reserved character.
+ if (llvm::isAlnum(C) ||
+ StringRef::npos != StringRef("-._~:@!$&'()*+,;=").find(C))
+ return std::string(&C, 1);
+ return "%" + llvm::toHex(StringRef(&C, 1));
+}
+
+static std::string fileNameToURI(StringRef Filename) {
+ llvm::SmallString<32> Ret = StringRef("file://");
+
+ // Get the root name to see if it has a URI authority.
+ StringRef Root = sys::path::root_name(Filename);
+ if (Root.startswith("//")) {
+ // There is an authority, so add it to the URI.
+ Ret += Root.drop_front(2).str();
+ } else if (!Root.empty()) {
+ // There is no authority, so end the component and add the root to the URI.
+ Ret += Twine("/" + Root).str();
+ }
+
+ auto Iter = sys::path::begin(Filename), End = sys::path::end(Filename);
+ assert(Iter != End && "Expected there to be a non-root path component.");
+ // Add the rest of the path components, encoding any reserved characters;
+ // we skip past the first path component, as it was handled it above.
+ std::for_each(++Iter, End, [&Ret](StringRef Component) {
+ // For reasons unknown to me, we may get a backslash with Windows native
+ // paths for the initial backslash following the drive component, which
+ // we need to ignore as a URI path part.
+ if (Component == "\\")
+ return;
+
+ // Add the separator between the previous path part and the one being
+ // currently processed.
+ Ret += "/";
+
+ // URI encode the part.
+ for (char C : Component) {
+ Ret += percentEncodeURICharacter(C);
+ }
+ });
+
+ return Ret.str().str();
+}
+
+static json::Object createFileLocation(const FileEntry &FE) {
+ return json::Object{{"uri", fileNameToURI(getFileName(FE))}};
+}
+
+static json::Object createFile(const FileEntry &FE) {
+ return json::Object{{"fileLocation", createFileLocation(FE)},
+ {"roles", json::Array{"resultFile"}},
+ {"length", FE.getSize()},
+ {"mimeType", "text/plain"}};
+}
+
+static json::Object createFileLocation(const FileEntry &FE,
+ json::Array &Files) {
+ std::string FileURI = fileNameToURI(getFileName(FE));
+
+ // See if the Files array contains this URI already. If it does not, create
+ // a new file object to add to the array.
+ auto I = llvm::find_if(Files, [&](const json::Value &File) {
+ if (const json::Object *Obj = File.getAsObject()) {
+ if (const json::Object *FileLoc = Obj->getObject("fileLocation")) {
+ Optional<StringRef> URI = FileLoc->getString("uri");
+ return URI && URI->equals(FileURI);
+ }
+ }
+ return false;
+ });
+
+ // Calculate the index within the file location array so it can be stored in
+ // the JSON object.
+ auto Index = static_cast<unsigned>(std::distance(Files.begin(), I));
+ if (I == Files.end())
+ Files.push_back(createFile(FE));
+
+ return json::Object{{"uri", FileURI}, {"fileIndex", Index}};
+}
+
+static json::Object createTextRegion(SourceRange R, const SourceManager &SM) {
+ return json::Object{
+ {"startLine", SM.getExpansionLineNumber(R.getBegin())},
+ {"endLine", SM.getExpansionLineNumber(R.getEnd())},
+ {"startColumn", SM.getExpansionColumnNumber(R.getBegin())},
+ {"endColumn", SM.getExpansionColumnNumber(R.getEnd())}};
+}
+
+static json::Object createPhysicalLocation(SourceRange R, const FileEntry &FE,
+ const SourceManager &SMgr,
+ json::Array &Files) {
+ return json::Object{{{"fileLocation", createFileLocation(FE, Files)},
+ {"region", createTextRegion(R, SMgr)}}};
+}
+
+enum class Importance { Important, Essential, Unimportant };
+
+static StringRef importanceToStr(Importance I) {
+ switch (I) {
+ case Importance::Important:
+ return "important";
+ case Importance::Essential:
+ return "essential";
+ case Importance::Unimportant:
+ return "unimportant";
+ }
+ llvm_unreachable("Fully covered switch is not so fully covered");
+}
+
+static json::Object createThreadFlowLocation(json::Object &&Location,
+ Importance I) {
+ return json::Object{{"location", std::move(Location)},
+ {"importance", importanceToStr(I)}};
+}
+
+static json::Object createMessage(StringRef Text) {
+ return json::Object{{"text", Text.str()}};
+}
+
+static json::Object createLocation(json::Object &&PhysicalLocation,
+ StringRef Message = "") {
+ json::Object Ret{{"physicalLocation", std::move(PhysicalLocation)}};
+ if (!Message.empty())
+ Ret.insert({"message", createMessage(Message)});
+ return Ret;
+}
+
+static Importance calculateImportance(const PathDiagnosticPiece &Piece) {
+ switch (Piece.getKind()) {
+ case PathDiagnosticPiece::Kind::Call:
+ case PathDiagnosticPiece::Kind::Macro:
+ case PathDiagnosticPiece::Kind::Note:
+ // FIXME: What should be reported here?
+ break;
+ case PathDiagnosticPiece::Kind::Event:
+ return Piece.getTagStr() == "ConditionBRVisitor" ? Importance::Important
+ : Importance::Essential;
+ case PathDiagnosticPiece::Kind::ControlFlow:
+ return Importance::Unimportant;
+ }
+ return Importance::Unimportant;
+}
+
+static json::Object createThreadFlow(const PathPieces &Pieces,
+ json::Array &Files) {
+ const SourceManager &SMgr = Pieces.front()->getLocation().getManager();
+ json::Array Locations;
+ for (const auto &Piece : Pieces) {
+ const PathDiagnosticLocation &P = Piece->getLocation();
+ Locations.push_back(createThreadFlowLocation(
+ createLocation(createPhysicalLocation(P.asRange(),
+ *P.asLocation().getFileEntry(),
+ SMgr, Files),
+ Piece->getString()),
+ calculateImportance(*Piece)));
+ }
+ return json::Object{{"locations", std::move(Locations)}};
+}
+
+static json::Object createCodeFlow(const PathPieces &Pieces,
+ json::Array &Files) {
+ return json::Object{
+ {"threadFlows", json::Array{createThreadFlow(Pieces, Files)}}};
+}
+
+static json::Object createTool() {
+ return json::Object{{"name", "clang"},
+ {"fullName", "clang static analyzer"},
+ {"language", "en-US"},
+ {"version", getClangFullVersion()}};
+}
+
+static json::Object createResult(const PathDiagnostic &Diag, json::Array &Files,
+ const StringMap<unsigned> &RuleMapping) {
+ const PathPieces &Path = Diag.path.flatten(false);
+ const SourceManager &SMgr = Path.front()->getLocation().getManager();
+
+ auto Iter = RuleMapping.find(Diag.getCheckName());
+ assert(Iter != RuleMapping.end() && "Rule ID is not in the array index map?");
+
+ return json::Object{
+ {"message", createMessage(Diag.getVerboseDescription())},
+ {"codeFlows", json::Array{createCodeFlow(Path, Files)}},
+ {"locations",
+ json::Array{createLocation(createPhysicalLocation(
+ Diag.getLocation().asRange(),
+ *Diag.getLocation().asLocation().getFileEntry(), SMgr, Files))}},
+ {"ruleIndex", Iter->getValue()},
+ {"ruleId", Diag.getCheckName()}};
+}
+
+static StringRef getRuleDescription(StringRef CheckName) {
+ return llvm::StringSwitch<StringRef>(CheckName)
+#define GET_CHECKERS
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+ .Case(FULLNAME, HELPTEXT)
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+ ;
+}
+
+static StringRef getRuleHelpURIStr(StringRef CheckName) {
+ return llvm::StringSwitch<StringRef>(CheckName)
+#define GET_CHECKERS
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+ .Case(FULLNAME, DOC_URI)
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+ ;
+}
+
+static json::Object createRule(const PathDiagnostic &Diag) {
+ StringRef CheckName = Diag.getCheckName();
+ json::Object Ret{
+ {"fullDescription", createMessage(getRuleDescription(CheckName))},
+ {"name", createMessage(CheckName)},
+ {"id", CheckName}};
+
+ std::string RuleURI = getRuleHelpURIStr(CheckName);
+ if (!RuleURI.empty())
+ Ret["helpUri"] = RuleURI;
+
+ return Ret;
+}
+
+static json::Array createRules(std::vector<const PathDiagnostic *> &Diags,
+ StringMap<unsigned> &RuleMapping) {
+ json::Array Rules;
+ llvm::StringSet<> Seen;
+
+ llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+ StringRef RuleID = D->getCheckName();
+ std::pair<llvm::StringSet<>::iterator, bool> P = Seen.insert(RuleID);
+ if (P.second) {
+ RuleMapping[RuleID] = Rules.size(); // Maps RuleID to an Array Index.
+ Rules.push_back(createRule(*D));
+ }
+ });
+
+ return Rules;
+}
+
+static json::Object createResources(std::vector<const PathDiagnostic *> &Diags,
+ StringMap<unsigned> &RuleMapping) {
+ return json::Object{{"rules", createRules(Diags, RuleMapping)}};
+}
+
+static json::Object createRun(std::vector<const PathDiagnostic *> &Diags) {
+ json::Array Results, Files;
+ StringMap<unsigned> RuleMapping;
+ json::Object Resources = createResources(Diags, RuleMapping);
+
+ llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+ Results.push_back(createResult(*D, Files, RuleMapping));
+ });
+
+ return json::Object{{"tool", createTool()},
+ {"resources", std::move(Resources)},
+ {"results", std::move(Results)},
+ {"files", std::move(Files)}};
+}
+
+void SarifDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags, FilesMade *) {
+ // We currently overwrite the file if it already exists. However, it may be
+ // useful to add a feature someday that allows the user to append a run to an
+ // existing SARIF file. One danger from that approach is that the size of the
+ // file can become large very quickly, so decoding into JSON to append a run
+ // may be an expensive operation.
+ std::error_code EC;
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
+ return;
+ }
+ json::Object Sarif{
+ {"$schema",
+ "http://json.schemastore.org/sarif-2.0.0-csd.2.beta.2018-11-28"},
+ {"version", "2.0.0-csd.2.beta.2018-11-28"},
+ {"runs", json::Array{createRun(Diags)}}};
+ OS << llvm::formatv("{0:2}", json::Value(std::move(Sarif)));
+}
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 62c54fc956a9..fc57cecac9cb 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -454,12 +454,12 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
QualType SingleTy;
auto &Opts =
- StateMgr.getOwningEngine()->getAnalysisManager().getAnalyzerOptions();
+ StateMgr.getOwningEngine().getAnalysisManager().getAnalyzerOptions();
// FIXME: After putting complexity threshold to the symbols we can always
// rearrange additive operations but rearrange comparisons only if
// option is set.
- if(!Opts.shouldAggressivelySimplifyBinaryOperation())
+ if(!Opts.ShouldAggressivelySimplifyBinaryOperation)
return None;
SymbolRef LSym = Lhs.getAsSymbol();
@@ -475,9 +475,6 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
SingleTy = ResultTy;
if (LSym->getType() != SingleTy)
return None;
- // Substracting unsigned integers is a nightmare.
- if (!SingleTy->isSignedIntegerOrEnumerationType())
- return None;
} else {
// Don't rearrange other operations.
return None;
@@ -485,6 +482,10 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
assert(!SingleTy.isNull() && "We should have figured out the type by now!");
+ // Rearrange signed symbolic expressions only
+ if (!SingleTy->isSignedIntegerOrEnumerationType())
+ return None;
+
SymbolRef RSym = Rhs.getAsSymbol();
if (!RSym || RSym->getType() != SingleTy)
return None;
@@ -534,7 +535,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
while (1) {
switch (lhs.getSubKind()) {
default:
- return makeSymExprValNN(state, op, lhs, rhs, resultTy);
+ return makeSymExprValNN(op, lhs, rhs, resultTy);
case nonloc::PointerToMemberKind: {
assert(rhs.getSubKind() == nonloc::PointerToMemberKind &&
"Both SVals should have pointer-to-member-type");
@@ -582,7 +583,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return makeTruthVal(true, resultTy);
default:
// This case also handles pointer arithmetic.
- return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
}
}
@@ -624,7 +625,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_LE:
case BO_GE:
op = BinaryOperator::reverseComparisonOp(op);
- // FALL-THROUGH
+ LLVM_FALLTHROUGH;
case BO_EQ:
case BO_NE:
case BO_Add:
@@ -638,14 +639,14 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// (~0)>>a
if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
return evalCastFromNonLoc(lhs, resultTy);
- // FALL-THROUGH
+ LLVM_FALLTHROUGH;
case BO_Shl:
// 0<<a and 0>>a
if (LHSValue == 0)
return evalCastFromNonLoc(lhs, resultTy);
- return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
default:
- return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
}
case nonloc::SymbolValKind: {
@@ -757,7 +758,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return *V;
// Give up -- this is not a symbolic expression we can handle.
- return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+ return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
}
}
@@ -1201,6 +1202,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
SVal V) {
+ V = simplifySVal(state, V);
if (V.isUnknownOrUndef())
return nullptr;
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index 94188a9ef698..4fa937d9658d 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -88,7 +88,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
return R;
// We don't know what to make of it. Return a NULL region, which
- // will be interpretted as UnknownVal.
+ // will be interpreted as UnknownVal.
return nullptr;
}
@@ -138,6 +138,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::VarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
+ case MemRegion::CXXDerivedObjectRegionKind:
return MakeElementRegion(cast<SubRegion>(R), PointeeTy);
case MemRegion::ElementRegionKind: {
@@ -272,9 +273,8 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
bool IsVirtual) {
- Optional<loc::MemRegionVal> DerivedRegVal =
- Derived.getAs<loc::MemRegionVal>();
- if (!DerivedRegVal)
+ const MemRegion *DerivedReg = Derived.getAsRegion();
+ if (!DerivedReg)
return Derived;
const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
@@ -282,8 +282,18 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
BaseDecl = BaseType->getAsCXXRecordDecl();
assert(BaseDecl && "not a C++ object?");
+ if (const auto *AlreadyDerivedReg =
+ dyn_cast<CXXDerivedObjectRegion>(DerivedReg)) {
+ if (const auto *SR =
+ dyn_cast<SymbolicRegion>(AlreadyDerivedReg->getSuperRegion()))
+ if (SR->getSymbol()->getType()->getPointeeCXXRecordDecl() == BaseDecl)
+ return loc::MemRegionVal(SR);
+
+ DerivedReg = AlreadyDerivedReg->getSuperRegion();
+ }
+
const MemRegion *BaseReg = MRMgr.getCXXBaseObjectRegion(
- BaseDecl, cast<SubRegion>(DerivedRegVal->getRegion()), IsVirtual);
+ BaseDecl, cast<SubRegion>(DerivedReg), IsVirtual);
return loc::MemRegionVal(BaseReg);
}
@@ -365,6 +375,20 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
MR = Uncasted;
}
+ // If we're casting a symbolic base pointer to a derived class, use
+ // CXXDerivedObjectRegion to represent the cast. If it's a pointer to an
+ // unrelated type, it must be a weird reinterpret_cast and we have to
+ // be fine with ElementRegion. TODO: Should we instead make
+ // Derived{TargetClass, Element{SourceClass, SR}}?
+ if (const auto *SR = dyn_cast<SymbolicRegion>(MR)) {
+ QualType T = SR->getSymbol()->getType();
+ const CXXRecordDecl *SourceClass = T->getPointeeCXXRecordDecl();
+ if (TargetClass && SourceClass && TargetClass->isDerivedFrom(SourceClass))
+ return loc::MemRegionVal(
+ MRMgr.getCXXDerivedObjectRegion(TargetClass, SR));
+ return loc::MemRegionVal(GetElementZeroRegion(SR, TargetType));
+ }
+
// We failed if the region we ended up with has perfect type info.
Failed = isa<TypedValueRegion>(MR);
return UnknownVal();
@@ -378,6 +402,17 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
if (castTy.isNull() || V.isUnknownOrUndef())
return V;
+ // The dispatchCast() call below would convert the int into a float.
+ // What we want, however, is a bit-by-bit reinterpretation of the int
+ // as a float, which usually yields nothing garbage. For now skip casts
+ // from ints to floats.
+ // TODO: What other combinations of types are affected?
+ if (castTy->isFloatingType()) {
+ SymbolRef Sym = V.getAsSymbol();
+ if (Sym && !Sym->getType()->isFloatingType())
+ return UnknownVal();
+ }
+
// When retrieving symbolic pointer and expecting a non-void pointer,
// wrap them into element regions of the expected type if necessary.
// SValBuilder::dispatchCast() doesn't do that, but it is necessary to
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index ed197010ebb7..66273f099a38 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -83,7 +83,13 @@ void SymbolCast::dumpToStream(raw_ostream &os) const {
}
void SymbolConjured::dumpToStream(raw_ostream &os) const {
- os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << ", LC"
+ << LCtx->getID();
+ if (S)
+ os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
+ else
+ os << ", no stmt";
+ os << ", #" << Count << '}';
}
void SymbolDerived::dumpToStream(raw_ostream &os) const {
@@ -395,7 +401,6 @@ void SymbolReaper::markDependentsLive(SymbolRef sym) {
void SymbolReaper::markLive(SymbolRef sym) {
TheLiving[sym] = NotProcessed;
- TheDead.erase(sym);
markDependentsLive(sym);
}
@@ -420,14 +425,6 @@ void SymbolReaper::markInUse(SymbolRef sym) {
MetadataInUse.insert(sym);
}
-bool SymbolReaper::maybeDead(SymbolRef sym) {
- if (isLive(sym))
- return false;
-
- TheDead.insert(sym);
- return true;
-}
-
bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
if (RegionRoots.count(MR))
return true;
diff --git a/lib/StaticAnalyzer/Core/TaintManager.cpp b/lib/StaticAnalyzer/Core/TaintManager.cpp
new file mode 100644
index 000000000000..c34b0ca1839d
--- /dev/null
+++ b/lib/StaticAnalyzer/Core/TaintManager.cpp
@@ -0,0 +1,23 @@
+//== TaintManager.cpp ------------------------------------------ -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+
+using namespace clang;
+using namespace ento;
+
+void *ProgramStateTrait<TaintMap>::GDMIndex() {
+ static int index = 0;
+ return &index;
+}
+
+void *ProgramStateTrait<DerivedSymTaint>::GDMIndex() {
+ static int index;
+ return &index;
+}
diff --git a/lib/StaticAnalyzer/Core/WorkList.cpp b/lib/StaticAnalyzer/Core/WorkList.cpp
index 4b227375da9b..e705393cb83a 100644
--- a/lib/StaticAnalyzer/Core/WorkList.cpp
+++ b/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -152,7 +152,7 @@ public:
auto BE = N->getLocation().getAs<BlockEntrance>();
if (!BE) {
- // Assume the choice of the order of the preceeding block entrance was
+ // Assume the choice of the order of the preceding block entrance was
// correct.
StackUnexplored.push_back(U);
} else {
@@ -252,3 +252,63 @@ public:
std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityQueue() {
return llvm::make_unique<UnexploredFirstPriorityQueue>();
}
+
+namespace {
+class UnexploredFirstPriorityLocationQueue : public WorkList {
+ using LocIdentifier = const CFGBlock *;
+
+ // How many times each location was visited.
+ // Is signed because we negate it later in order to have a reversed
+ // comparison.
+ using VisitedTimesMap = llvm::DenseMap<LocIdentifier, int>;
+
+ // Compare by number of times the location was visited first (negated
+ // to prefer less often visited locations), then by insertion time (prefer
+ // expanding nodes inserted sooner first).
+ using QueuePriority = std::pair<int, unsigned long>;
+ using QueueItem = std::pair<WorkListUnit, QueuePriority>;
+
+ struct ExplorationComparator {
+ bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
+ return LHS.second < RHS.second;
+ }
+ };
+
+ // Number of inserted nodes, used to emulate DFS ordering in the priority
+ // queue when insertions are equal.
+ unsigned long Counter = 0;
+
+ // Number of times a current location was reached.
+ VisitedTimesMap NumReached;
+
+ // The top item is the largest one.
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ queue;
+
+public:
+ bool hasWork() const override {
+ return !queue.empty();
+ }
+
+ void enqueue(const WorkListUnit &U) override {
+ const ExplodedNode *N = U.getNode();
+ unsigned NumVisited = 0;
+ if (auto BE = N->getLocation().getAs<BlockEntrance>())
+ NumVisited = NumReached[BE->getBlock()]++;
+
+ queue.push(std::make_pair(U, std::make_pair(-NumVisited, ++Counter)));
+ }
+
+ WorkListUnit dequeue() override {
+ QueueItem U = queue.top();
+ queue.pop();
+ return U.first;
+ }
+
+};
+
+}
+
+std::unique_ptr<WorkList> WorkList::makeUnexploredFirstPriorityLocationQueue() {
+ return llvm::make_unique<UnexploredFirstPriorityLocationQueue>();
+}
diff --git a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
index 7379ded49c80..c4729f969f33 100644
--- a/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp
@@ -11,10 +11,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTExpr.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTSolver.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SMTSort.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
#include "clang/Config/config.h"
@@ -49,15 +46,15 @@ public:
// Function used to report errors
void Z3ErrorHandler(Z3_context Context, Z3_error_code Error) {
llvm::report_fatal_error("Z3 error: " +
- llvm::Twine(Z3_get_error_msg_ex(Context, Error)));
+ llvm::Twine(Z3_get_error_msg(Context, Error)));
}
/// Wrapper for Z3 context
-class Z3Context : public SMTContext {
+class Z3Context {
public:
Z3_context Context;
- Z3Context() : SMTContext() {
+ Z3Context() {
Context = Z3_mk_context_rc(Z3Config().Config);
// The error function is set here because the context is the first object
// created by the backend
@@ -80,32 +77,27 @@ class Z3Sort : public SMTSort {
public:
/// Default constructor, mainly used by make_shared
- Z3Sort(Z3Context &C, Z3_sort ZS) : SMTSort(), Context(C), Sort(ZS) {
+ Z3Sort(Z3Context &C, Z3_sort ZS) : Context(C), Sort(ZS) {
Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
/// Override implicit copy constructor for correct reference counting.
- Z3Sort(const Z3Sort &Copy)
- : SMTSort(), Context(Copy.Context), Sort(Copy.Sort) {
+ Z3Sort(const Z3Sort &Other) : Context(Other.Context), Sort(Other.Sort) {
Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
}
- /// Provide move constructor
- Z3Sort(Z3Sort &&Move) : SMTSort(), Context(Move.Context), Sort(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Sort &operator=(Z3Sort &&Move) {
- if (this != &Move) {
- if (Sort)
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- Sort = Move.Sort;
- Move.Sort = nullptr;
- }
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Sort &operator=(const Z3Sort &Other) {
+ Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Other.Sort));
+ Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
+ Sort = Other.Sort;
return *this;
}
+ Z3Sort(Z3Sort &&Other) = delete;
+ Z3Sort &operator=(Z3Sort &&Other) = delete;
+
~Z3Sort() {
if (Sort)
Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
@@ -137,13 +129,6 @@ public:
static_cast<const Z3Sort &>(Other).Sort);
}
- Z3Sort &operator=(const Z3Sort &Move) {
- Z3_inc_ref(Context.Context, reinterpret_cast<Z3_ast>(Move.Sort));
- Z3_dec_ref(Context.Context, reinterpret_cast<Z3_ast>(Sort));
- Sort = Move.Sort;
- return *this;
- }
-
void print(raw_ostream &OS) const override {
OS << Z3_sort_to_string(Context.Context, Sort);
}
@@ -170,22 +155,18 @@ public:
Z3_inc_ref(Context.Context, AST);
}
- /// Provide move constructor
- Z3Expr(Z3Expr &&Move) : SMTExpr(), Context(Move.Context), AST(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Expr &operator=(Z3Expr &&Move) {
- if (this != &Move) {
- if (AST)
- Z3_dec_ref(Context.Context, AST);
- AST = Move.AST;
- Move.AST = nullptr;
- }
+ /// Override implicit copy assignment constructor for correct reference
+ /// counting.
+ Z3Expr &operator=(const Z3Expr &Other) {
+ Z3_inc_ref(Context.Context, Other.AST);
+ Z3_dec_ref(Context.Context, AST);
+ AST = Other.AST;
return *this;
}
+ Z3Expr(Z3Expr &&Other) = delete;
+ Z3Expr &operator=(Z3Expr &&Other) = delete;
+
~Z3Expr() {
if (AST)
Z3_dec_ref(Context.Context, AST);
@@ -205,14 +186,6 @@ public:
static_cast<const Z3Expr &>(Other).AST);
}
- /// Override implicit move constructor for correct reference counting.
- Z3Expr &operator=(const Z3Expr &Move) {
- Z3_inc_ref(Context.Context, Move.AST);
- Z3_dec_ref(Context.Context, AST);
- AST = Move.AST;
- return *this;
- }
-
void print(raw_ostream &OS) const override {
OS << Z3_ast_to_string(Context.Context, AST);
}
@@ -231,30 +204,13 @@ class Z3Model {
public:
Z3Model(Z3Context &C, Z3_model ZM) : Context(C), Model(ZM) {
- assert(C.Context != nullptr);
Z3_model_inc_ref(Context.Context, Model);
}
- /// Override implicit copy constructor for correct reference counting.
- Z3Model(const Z3Model &Copy) : Context(Copy.Context), Model(Copy.Model) {
- Z3_model_inc_ref(Context.Context, Model);
- }
-
- /// Provide move constructor
- Z3Model(Z3Model &&Move) : Context(Move.Context), Model(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Model &operator=(Z3Model &&Move) {
- if (this != &Move) {
- if (Model)
- Z3_model_dec_ref(Context.Context, Model);
- Model = Move.Model;
- Move.Model = nullptr;
- }
- return *this;
- }
+ Z3Model(const Z3Model &Other) = delete;
+ Z3Model(Z3Model &&Other) = delete;
+ Z3Model &operator=(Z3Model &Other) = delete;
+ Z3Model &operator=(Z3Model &&Other) = delete;
~Z3Model() {
if (Model)
@@ -313,32 +269,14 @@ class Z3Solver : public SMTSolver {
Z3_solver Solver;
public:
- Z3Solver() : SMTSolver(), Solver(Z3_mk_simple_solver(Context.Context)) {
- Z3_solver_inc_ref(Context.Context, Solver);
- }
-
- /// Override implicit copy constructor for correct reference counting.
- Z3Solver(const Z3Solver &Copy)
- : SMTSolver(), Context(Copy.Context), Solver(Copy.Solver) {
+ Z3Solver() : Solver(Z3_mk_simple_solver(Context.Context)) {
Z3_solver_inc_ref(Context.Context, Solver);
}
- /// Provide move constructor
- Z3Solver(Z3Solver &&Move)
- : SMTSolver(), Context(Move.Context), Solver(nullptr) {
- *this = std::move(Move);
- }
-
- /// Provide move assignment constructor
- Z3Solver &operator=(Z3Solver &&Move) {
- if (this != &Move) {
- if (Solver)
- Z3_solver_dec_ref(Context.Context, Solver);
- Solver = Move.Solver;
- Move.Solver = nullptr;
- }
- return *this;
- }
+ Z3Solver(const Z3Solver &Other) = delete;
+ Z3Solver(Z3Solver &&Other) = delete;
+ Z3Solver &operator=(Z3Solver &Other) = delete;
+ Z3Solver &operator=(Z3Solver &&Other) = delete;
~Z3Solver() {
if (Solver)
@@ -674,7 +612,7 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkFPtoSBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef mkSBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context,
@@ -682,7 +620,7 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkFPtoUBV(const SMTExprRef &From, const SMTSortRef &To) override {
+ SMTExprRef mkUBVtoFP(const SMTExprRef &From, const SMTSortRef &To) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context,
@@ -690,14 +628,14 @@ public:
toZ3Expr(*From).AST, toZ3Sort(*To).Sort)));
}
- SMTExprRef mkSBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context, Z3_mk_fpa_to_sbv(Context.Context, toZ3Expr(*RoundingMode).AST,
toZ3Expr(*From).AST, ToWidth)));
}
- SMTExprRef mkUBVtoFP(const SMTExprRef &From, unsigned ToWidth) override {
+ SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) override {
SMTExprRef RoundingMode = getFloatRoundingMode();
return newExprRef(Z3Expr(
Context, Z3_mk_fpa_to_ubv(Context.Context, toZ3Expr(*RoundingMode).AST,
@@ -736,9 +674,11 @@ public:
llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
bool isUnsigned) override {
- return llvm::APSInt(llvm::APInt(
- BitWidth, Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
- 10));
+ return llvm::APSInt(
+ llvm::APInt(BitWidth,
+ Z3_get_numeral_string(Context.Context, toZ3Expr(*Exp).AST),
+ 10),
+ isUnsigned);
}
bool getBoolean(const SMTExprRef &Exp) override {
@@ -750,42 +690,6 @@ public:
return newExprRef(Z3Expr(Context, Z3_mk_fpa_rne(Context.Context)));
}
- SMTExprRef fromData(const SymbolID ID, const QualType &Ty,
- uint64_t BitWidth) override {
- llvm::Twine Name = "$" + llvm::Twine(ID);
- return mkSymbol(Name.str().c_str(), mkSort(Ty, BitWidth));
- }
-
- SMTExprRef fromBoolean(const bool Bool) override {
- Z3_ast AST =
- Bool ? Z3_mk_true(Context.Context) : Z3_mk_false(Context.Context);
- return newExprRef(Z3Expr(Context, AST));
- }
-
- SMTExprRef fromAPFloat(const llvm::APFloat &Float) override {
- SMTSortRef Sort =
- getFloatSort(llvm::APFloat::semanticsSizeInBits(Float.getSemantics()));
-
- llvm::APSInt Int = llvm::APSInt(Float.bitcastToAPInt(), false);
- SMTExprRef Z3Int = fromAPSInt(Int);
- return newExprRef(Z3Expr(
- Context, Z3_mk_fpa_to_fp_bv(Context.Context, toZ3Expr(*Z3Int).AST,
- toZ3Sort(*Sort).Sort)));
- }
-
- SMTExprRef fromAPSInt(const llvm::APSInt &Int) override {
- SMTSortRef Sort = getBitvectorSort(Int.getBitWidth());
- Z3_ast AST = Z3_mk_numeral(Context.Context, Int.toString(10).c_str(),
- toZ3Sort(*Sort).Sort);
- return newExprRef(Z3Expr(Context, AST));
- }
-
- SMTExprRef fromInt(const char *Int, uint64_t BitWidth) override {
- SMTSortRef Sort = getBitvectorSort(BitWidth);
- Z3_ast AST = Z3_mk_numeral(Context.Context, Int, toZ3Sort(*Sort).Sort);
- return newExprRef(Z3Expr(Context, AST));
- }
-
bool toAPFloat(const SMTSortRef &Sort, const SMTExprRef &AST,
llvm::APFloat &Float, bool useSemantics) {
assert(Sort->isFloatSort() && "Unsupported sort to floating-point!");
@@ -846,7 +750,7 @@ public:
}
bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) override {
- Z3Model Model = getModel();
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
Z3_func_decl Func = Z3_get_app_decl(
Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
@@ -860,7 +764,7 @@ public:
}
bool getInterpretation(const SMTExprRef &Exp, llvm::APFloat &Float) override {
- Z3Model Model = getModel();
+ Z3Model Model(Context, Z3_solver_get_model(Context.Context, Solver));
Z3_func_decl Func = Z3_get_app_decl(
Context.Context, Z3_to_app(Context.Context, toZ3Expr(*Exp).AST));
if (Z3_model_has_interp(Context.Context, Model.Model, Func) != Z3_L_TRUE)
@@ -873,7 +777,7 @@ public:
return toAPFloat(Sort, Assign, Float, true);
}
- ConditionTruthVal check() const override {
+ Optional<bool> check() const override {
Z3_lbool res = Z3_solver_check(Context.Context, Solver);
if (res == Z3_L_TRUE)
return true;
@@ -881,7 +785,7 @@ public:
if (res == Z3_L_FALSE)
return false;
- return ConditionTruthVal();
+ return Optional<bool>();
}
void push() override { return Z3_solver_push(Context.Context, Solver); }
@@ -891,138 +795,34 @@ public:
return Z3_solver_pop(Context.Context, Solver, NumStates);
}
- /// Get a model from the solver. Caller should check the model is
- /// satisfiable.
- Z3Model getModel() {
- return Z3Model(Context, Z3_solver_get_model(Context.Context, Solver));
- }
+ bool isFPSupported() override { return true; }
/// Reset the solver and remove all constraints.
- void reset() const override { Z3_solver_reset(Context.Context, Solver); }
+ void reset() override { Z3_solver_reset(Context.Context, Solver); }
void print(raw_ostream &OS) const override {
OS << Z3_solver_to_string(Context.Context, Solver);
}
}; // end class Z3Solver
-class Z3ConstraintManager : public SMTConstraintManager {
+class Z3ConstraintManager : public SMTConstraintManager<ConstraintZ3, Z3Expr> {
SMTSolverRef Solver = CreateZ3Solver();
public:
Z3ConstraintManager(SubEngine *SE, SValBuilder &SB)
: SMTConstraintManager(SE, SB, Solver) {}
-
- void addStateConstraints(ProgramStateRef State) const override {
- // TODO: Don't add all the constraints, only the relevant ones
- ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
- ConstraintZ3Ty::iterator I = CZ.begin(), IE = CZ.end();
-
- // Construct the logical AND of all the constraints
- if (I != IE) {
- std::vector<SMTExprRef> ASTs;
-
- SMTExprRef Constraint = Solver->newExprRef(I++->second);
- while (I != IE) {
- Constraint = Solver->mkAnd(Constraint, Solver->newExprRef(I++->second));
- }
-
- Solver->addConstraint(Constraint);
- }
- }
-
- bool canReasonAbout(SVal X) const override {
- const TargetInfo &TI = getBasicVals().getContext().getTargetInfo();
-
- Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
- if (!SymVal)
- return true;
-
- const SymExpr *Sym = SymVal->getSymbol();
- QualType Ty = Sym->getType();
-
- // Complex types are not modeled
- if (Ty->isComplexType() || Ty->isComplexIntegerType())
- return false;
-
- // Non-IEEE 754 floating-point types are not modeled
- if ((Ty->isSpecificBuiltinType(BuiltinType::LongDouble) &&
- (&TI.getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended() ||
- &TI.getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())))
- return false;
-
- if (isa<SymbolData>(Sym))
- return true;
-
- SValBuilder &SVB = getSValBuilder();
-
- if (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
- return canReasonAbout(SVB.makeSymbolVal(SC->getOperand()));
-
- if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
- if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(SIE->getLHS()));
-
- if (const IntSymExpr *ISE = dyn_cast<IntSymExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(ISE->getRHS()));
-
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(BSE))
- return canReasonAbout(SVB.makeSymbolVal(SSE->getLHS())) &&
- canReasonAbout(SVB.makeSymbolVal(SSE->getRHS()));
- }
-
- llvm_unreachable("Unsupported expression to reason about!");
- }
-
- ProgramStateRef removeDeadBindings(ProgramStateRef State,
- SymbolReaper &SymReaper) override {
- ConstraintZ3Ty CZ = State->get<ConstraintZ3>();
- ConstraintZ3Ty::Factory &CZFactory = State->get_context<ConstraintZ3>();
-
- for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- if (SymReaper.maybeDead(I->first))
- CZ = CZFactory.remove(CZ, *I);
- }
-
- return State->set<ConstraintZ3>(CZ);
- }
-
- ProgramStateRef assumeExpr(ProgramStateRef State, SymbolRef Sym,
- const SMTExprRef &Exp) override {
- // Check the model, avoid simplifying AST to save time
- if (checkModel(State, Exp).isConstrainedTrue())
- return State->add<ConstraintZ3>(std::make_pair(Sym, toZ3Expr(*Exp)));
-
- return nullptr;
- }
-
- //==------------------------------------------------------------------------==/
- // Pretty-printing.
- //==------------------------------------------------------------------------==/
-
- void print(ProgramStateRef St, raw_ostream &OS, const char *nl,
- const char *sep) override {
-
- ConstraintZ3Ty CZ = St->get<ConstraintZ3>();
-
- OS << nl << sep << "Constraints:";
- for (ConstraintZ3Ty::iterator I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- OS << nl << ' ' << I->first << " : ";
- I->second.print(OS);
- }
- OS << nl;
- }
}; // end class Z3ConstraintManager
} // end anonymous namespace
#endif
-std::unique_ptr<SMTSolver> clang::ento::CreateZ3Solver() {
+SMTSolverRef clang::ento::CreateZ3Solver() {
#if CLANG_ANALYZER_WITH_Z3
return llvm::make_unique<Z3Solver>();
#else
llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
false);
return nullptr;
#endif
@@ -1034,7 +834,7 @@ ento::CreateZ3ConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
return llvm::make_unique<Z3ConstraintManager>(Eng, StMgr.getSValBuilder());
#else
llvm::report_fatal_error("Clang was not compiled with Z3 support, rebuild "
- "with -DCLANG_ANALYZER_BUILD_Z3=ON",
+ "with -DCLANG_ANALYZER_ENABLE_Z3_SOLVER=ON",
false);
return nullptr;
#endif
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 44abde5da6d1..d87937d9b63d 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -50,8 +50,6 @@ using namespace ento;
#define DEBUG_TYPE "AnalysisConsumer"
-static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz();
-
STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
STATISTIC(NumFunctionsAnalyzed,
"The # of functions and blocks analyzed (as top level "
@@ -206,7 +204,7 @@ public:
PP(CI.getPreprocessor()), OutDir(outdir), Opts(std::move(opts)),
Plugins(plugins), Injector(injector), CTU(CI) {
DigestAnalyzerOptions();
- if (Opts->PrintStats || Opts->shouldSerializeStats()) {
+ if (Opts->PrintStats || Opts->ShouldSerializeStats) {
AnalyzerTimers = llvm::make_unique<llvm::TimerGroup>(
"analyzer", "Analyzer timers");
TUTotalTimer = llvm::make_unique<llvm::Timer>(
@@ -295,13 +293,12 @@ public:
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr =
- createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
- CheckerRegistrationFns, PP.getDiagnostics());
+ checkerMgr = createCheckerManager(
+ *Ctx, *Opts, Plugins, CheckerRegistrationFns, PP.getDiagnostics());
Mgr = llvm::make_unique<AnalysisManager>(
- *Ctx, PP.getDiagnostics(), PP.getLangOpts(), PathConsumers,
- CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
+ *Ctx, PP.getDiagnostics(), PathConsumers, CreateStoreMgr,
+ CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
}
/// Store the top level decls in the set to be processed later on.
@@ -334,9 +331,6 @@ public:
void RunPathSensitiveChecks(Decl *D,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees);
- void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
- ExprEngine::InliningModes IMode,
- SetOfConstDecls *VisitedCallees);
/// Visitors for the RecursiveASTVisitor.
bool shouldWalkTypesOfTypeLocs() const { return false; }
@@ -682,7 +676,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
// - System headers: don't run any checks.
SourceManager &SM = Ctx->getSourceManager();
const Stmt *Body = D->getBody();
- SourceLocation SL = Body ? Body->getLocStart() : D->getLocation();
+ SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation();
SL = SM.getExpansionLoc(SL);
if (!Opts->AnalyzeAll && !Mgr->isInCodeFile(SL)) {
@@ -729,9 +723,9 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
// Path-sensitive checking.
//===----------------------------------------------------------------------===//
-void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
- ExprEngine::InliningModes IMode,
- SetOfConstDecls *VisitedCallees) {
+void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
+ ExprEngine::InliningModes IMode,
+ SetOfConstDecls *VisitedCallees) {
// Construct the analysis engine. First check if the CFG is valid.
// FIXME: Inter-procedural analysis will need to handle invalid CFGs.
if (!Mgr->getCFG(D))
@@ -741,23 +735,14 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
return;
- ExprEngine Eng(CTU, *Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,
- IMode);
-
- // Set the graph auditor.
- std::unique_ptr<ExplodedNode::Auditor> Auditor;
- if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
- Auditor = CreateUbiViz();
- ExplodedNode::SetAuditor(Auditor.get());
- }
+ ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
// Execute the worklist algorithm.
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
- Mgr->options.getMaxNodesPerTopLevelFunction());
+ Mgr->options.MaxNodesPerTopLevelFunction);
- // Release the auditor (if any) so that it doesn't monitor the graph
- // created BugReporter.
- ExplodedNode::SetAuditor(nullptr);
+ if (!Mgr->options.DumpExplodedGraphTo.empty())
+ Eng.DumpGraph(Mgr->options.TrimGraph, Mgr->options.DumpExplodedGraphTo);
// Visualize the exploded graph.
if (Mgr->options.visualizeExplodedGraphWithGraphViz)
@@ -767,26 +752,6 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
Eng.getBugReporter().FlushReports();
}
-void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
- ExprEngine::InliningModes IMode,
- SetOfConstDecls *Visited) {
-
- switch (Mgr->getLangOpts().getGC()) {
- case LangOptions::NonGC:
- ActionExprEngine(D, false, IMode, Visited);
- break;
-
- case LangOptions::GCOnly:
- ActionExprEngine(D, true, IMode, Visited);
- break;
-
- case LangOptions::HybridGC:
- ActionExprEngine(D, false, IMode, Visited);
- ActionExprEngine(D, true, IMode, Visited);
- break;
- }
-}
-
//===----------------------------------------------------------------------===//
// AnalysisConsumer creation.
//===----------------------------------------------------------------------===//
@@ -804,98 +769,3 @@ ento::CreateAnalysisConsumer(CompilerInstance &CI) {
CI.getFrontendOpts().Plugins,
hasModelPath ? new ModelInjector(CI) : nullptr);
}
-
-//===----------------------------------------------------------------------===//
-// Ubigraph Visualization. FIXME: Move to separate file.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class UbigraphViz : public ExplodedNode::Auditor {
- std::unique_ptr<raw_ostream> Out;
- std::string Filename;
- unsigned Cntr;
-
- typedef llvm::DenseMap<void*,unsigned> VMap;
- VMap M;
-
-public:
- UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename);
-
- ~UbigraphViz() override;
-
- void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) override;
-};
-
-} // end anonymous namespace
-
-static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz() {
- SmallString<128> P;
- int FD;
- llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P);
- llvm::errs() << "Writing '" << P << "'.\n";
-
- auto Stream = llvm::make_unique<llvm::raw_fd_ostream>(FD, true);
-
- return llvm::make_unique<UbigraphViz>(std::move(Stream), P);
-}
-
-void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
-
- assert (Src != Dst && "Self-edges are not allowed.");
-
- // Lookup the Src. If it is a new node, it's a root.
- VMap::iterator SrcI= M.find(Src);
- unsigned SrcID;
-
- if (SrcI == M.end()) {
- M[Src] = SrcID = Cntr++;
- *Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
- }
- else
- SrcID = SrcI->second;
-
- // Lookup the Dst.
- VMap::iterator DstI= M.find(Dst);
- unsigned DstID;
-
- if (DstI == M.end()) {
- M[Dst] = DstID = Cntr++;
- *Out << "('vertex', " << DstID << ")\n";
- }
- else {
- // We have hit DstID before. Change its style to reflect a cache hit.
- DstID = DstI->second;
- *Out << "('change_vertex_style', " << DstID << ", 1)\n";
- }
-
- // Add the edge.
- *Out << "('edge', " << SrcID << ", " << DstID
- << ", ('arrow','true'), ('oriented', 'true'))\n";
-}
-
-UbigraphViz::UbigraphViz(std::unique_ptr<raw_ostream> OutStream,
- StringRef Filename)
- : Out(std::move(OutStream)), Filename(Filename), Cntr(0) {
-
- *Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
- *Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
- " ('size', '1.5'))\n";
-}
-
-UbigraphViz::~UbigraphViz() {
- Out.reset();
- llvm::errs() << "Running 'ubiviz' program... ";
- std::string ErrMsg;
- std::string Ubiviz;
- if (auto Path = llvm::sys::findProgramByName("ubiviz"))
- Ubiviz = *Path;
- std::array<StringRef, 2> Args{{Ubiviz, Filename}};
-
- if (llvm::sys::ExecuteAndWait(Ubiviz, Args, llvm::None, {}, 0, 0, &ErrMsg)) {
- llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
- }
-
- // Delete the file.
- llvm::sys::fs::remove(Filename);
-}
diff --git a/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
index ff0a6e19fc97..5e7dd8f18cd7 100644
--- a/lib/StaticAnalyzer/Frontend/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -7,6 +7,7 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangStaticAnalyzerFrontend
AnalysisConsumer.cpp
CheckerRegistration.cpp
+ CheckerRegistry.cpp
FrontendActions.cpp
ModelConsumer.cpp
ModelInjector.cpp
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index a260c2d85b11..1c31c35b75e4 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -14,146 +14,124 @@
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
-#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Support/Path.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
using namespace clang;
using namespace ento;
-using llvm::sys::DynamicLibrary;
-
-namespace {
-class ClangCheckerRegistry : public CheckerRegistry {
- typedef void (*RegisterCheckersFn)(CheckerRegistry &);
-
- static bool isCompatibleAPIVersion(const char *versionString);
- static void warnIncompatible(DiagnosticsEngine *diags, StringRef pluginPath,
- const char *pluginAPIVersion);
-
-public:
- ClangCheckerRegistry(ArrayRef<std::string> plugins,
- DiagnosticsEngine *diags = nullptr);
-};
-
-} // end anonymous namespace
-
-ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
- DiagnosticsEngine *diags) {
- registerBuiltinCheckers(*this);
-
- for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
- i != e; ++i) {
- // Get access to the plugin.
- std::string err;
- DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str(), &err);
- if (!lib.isValid()) {
- diags->Report(diag::err_fe_unable_to_load_plugin) << *i << err;
- continue;
- }
-
- // See if it's compatible with this build of clang.
- const char *pluginAPIVersion =
- (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
- if (!isCompatibleAPIVersion(pluginAPIVersion)) {
- warnIncompatible(diags, *i, pluginAPIVersion);
- continue;
- }
-
- // Register its checkers.
- RegisterCheckersFn registerPluginCheckers =
- (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
- "clang_registerCheckers");
- if (registerPluginCheckers)
- registerPluginCheckers(*this);
- }
-}
-
-bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
- // If the version string is null, it's not an analyzer plugin.
- if (!versionString)
- return false;
-
- // For now, none of the static analyzer API is considered stable.
- // Versions must match exactly.
- return strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
-}
-
-void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
- StringRef pluginPath,
- const char *pluginAPIVersion) {
- if (!diags)
- return;
- if (!pluginAPIVersion)
- return;
-
- diags->Report(diag::warn_incompatible_analyzer_plugin_api)
- << llvm::sys::path::filename(pluginPath);
- diags->Report(diag::note_incompatible_analyzer_plugin_api)
- << CLANG_ANALYZER_API_VERSION_STRING
- << pluginAPIVersion;
-}
-
-static SmallVector<CheckerOptInfo, 8>
-getCheckerOptList(const AnalyzerOptions &opts) {
- SmallVector<CheckerOptInfo, 8> checkerOpts;
- for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
- const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
- checkerOpts.push_back(CheckerOptInfo(opt.first, opt.second));
- }
- return checkerOpts;
-}
std::unique_ptr<CheckerManager> ento::createCheckerManager(
- AnalyzerOptions &opts, const LangOptions &langOpts,
+ ASTContext &context,
+ AnalyzerOptions &opts,
ArrayRef<std::string> plugins,
ArrayRef<std::function<void(CheckerRegistry &)>> checkerRegistrationFns,
DiagnosticsEngine &diags) {
- std::unique_ptr<CheckerManager> checkerMgr(
- new CheckerManager(langOpts, opts));
-
- SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
+ auto checkerMgr = llvm::make_unique<CheckerManager>(context, opts);
- ClangCheckerRegistry allCheckers(plugins, &diags);
+ CheckerRegistry allCheckers(plugins, diags);
for (const auto &Fn : checkerRegistrationFns)
Fn(allCheckers);
- allCheckers.initializeManager(*checkerMgr, checkerOpts);
- allCheckers.validateCheckerOptions(opts, diags);
+ allCheckers.initializeManager(*checkerMgr, opts);
+ allCheckers.validateCheckerOptions(opts);
checkerMgr->finishedCheckerRegistration();
- for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
- if (checkerOpts[i].isUnclaimed()) {
- diags.Report(diag::err_unknown_analyzer_checker)
- << checkerOpts[i].getName();
- diags.Report(diag::note_suggest_disabling_all_checkers);
- }
-
- }
-
return checkerMgr;
}
-void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
+void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags) {
out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
- ClangCheckerRegistry(plugins).printHelp(out);
+ CheckerRegistry(plugins, diags).printHelp(out);
}
void ento::printEnabledCheckerList(raw_ostream &out,
ArrayRef<std::string> plugins,
- const AnalyzerOptions &opts) {
+ const AnalyzerOptions &opts,
+ DiagnosticsEngine &diags) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
- SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
- ClangCheckerRegistry(plugins).printList(out, checkerOpts);
+ CheckerRegistry(plugins, diags).printList(out, opts);
+}
+
+void ento::printAnalyzerConfigList(raw_ostream &out) {
+ out << "OVERVIEW: Clang Static Analyzer -analyzer-config Option List\n\n";
+ out << "USAGE: clang -cc1 [CLANG_OPTIONS] -analyzer-config "
+ "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ out << " clang -cc1 [CLANG_OPTIONS] -analyzer-config OPTION1=VALUE, "
+ "-analyzer-config OPTION2=VALUE, ...\n\n";
+ out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang"
+ "<OPTION1=VALUE,OPTION2=VALUE,...>\n\n";
+ out << " clang [CLANG_OPTIONS] -Xclang -analyzer-config -Xclang "
+ "OPTION1=VALUE, -Xclang -analyzer-config -Xclang "
+ "OPTION2=VALUE, ...\n\n";
+ out << "OPTIONS:\n\n";
+
+ using OptionAndDescriptionTy = std::pair<StringRef, std::string>;
+ OptionAndDescriptionTy PrintableOptions[] = {
+#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
+ { \
+ CMDFLAG, \
+ llvm::Twine(llvm::Twine() + "(" + \
+ (StringRef(#TYPE) == "StringRef" ? "string" : #TYPE ) + \
+ ") " DESC \
+ " (default: " #DEFAULT_VAL ")").str() \
+ },
+
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ { \
+ CMDFLAG, \
+ llvm::Twine(llvm::Twine() + "(" + \
+ (StringRef(#TYPE) == "StringRef" ? "string" : #TYPE ) + \
+ ") " DESC \
+ " (default: " #SHALLOW_VAL " in shallow mode, " #DEEP_VAL \
+ " in deep mode)").str() \
+ },
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+#undef ANALYZER_OPTION
+#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
+ };
+
+ llvm::sort(PrintableOptions, [](const OptionAndDescriptionTy &LHS,
+ const OptionAndDescriptionTy &RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ constexpr size_t MinLineWidth = 70;
+ constexpr size_t PadForOpt = 2;
+ constexpr size_t OptionWidth = 30;
+ constexpr size_t PadForDesc = PadForOpt + OptionWidth;
+ static_assert(MinLineWidth > PadForDesc, "MinLineWidth must be greater!");
+
+ llvm::formatted_raw_ostream FOut(out);
+
+ for (const auto &Pair : PrintableOptions) {
+ FOut.PadToColumn(PadForOpt) << Pair.first;
+
+ // If the buffer's length is greater then PadForDesc, print a newline.
+ if (FOut.getColumn() > PadForDesc)
+ FOut << '\n';
+
+ FOut.PadToColumn(PadForDesc);
+
+ for (char C : Pair.second) {
+ if (FOut.getColumn() > MinLineWidth && C == ' ') {
+ FOut << '\n';
+ FOut.PadToColumn(PadForDesc);
+ continue;
+ }
+ FOut << C;
+ }
+ FOut << "\n\n";
+ }
}
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
new file mode 100644
index 000000000000..620c0e588906
--- /dev/null
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -0,0 +1,247 @@
+//===- CheckerRegistry.cpp - Maintains all available checkers -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace ento;
+using llvm::sys::DynamicLibrary;
+
+using RegisterCheckersFn = void (*)(CheckerRegistry &);
+
+static bool isCompatibleAPIVersion(const char *versionString) {
+ // If the version string is null, it's not an analyzer plugin.
+ if (!versionString)
+ return false;
+
+ // For now, none of the static analyzer API is considered stable.
+ // Versions must match exactly.
+ return strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0;
+}
+
+CheckerRegistry::CheckerRegistry(ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags) : Diags(diags) {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI) \
+ addChecker(register##CLASS, FULLNAME, HELPTEXT, DOC_URI);
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+
+ for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
+ i != e; ++i) {
+ // Get access to the plugin.
+ std::string err;
+ DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str(), &err);
+ if (!lib.isValid()) {
+ diags.Report(diag::err_fe_unable_to_load_plugin) << *i << err;
+ continue;
+ }
+
+ // See if it's compatible with this build of clang.
+ const char *pluginAPIVersion =
+ (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
+ if (!isCompatibleAPIVersion(pluginAPIVersion)) {
+ Diags.Report(diag::warn_incompatible_analyzer_plugin_api)
+ << llvm::sys::path::filename(*i);
+ Diags.Report(diag::note_incompatible_analyzer_plugin_api)
+ << CLANG_ANALYZER_API_VERSION_STRING
+ << pluginAPIVersion;
+ continue;
+ }
+
+ // Register its checkers.
+ RegisterCheckersFn registerPluginCheckers =
+ (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
+ "clang_registerCheckers");
+ if (registerPluginCheckers)
+ registerPluginCheckers(*this);
+ }
+}
+
+static constexpr char PackageSeparator = '.';
+
+static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
+ const CheckerRegistry::CheckerInfo &b) {
+ return a.FullName < b.FullName;
+}
+
+static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
+ StringRef packageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!checker.FullName.startswith(packageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (checker.FullName.size() == packageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (checker.FullName[packageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
+}
+
+CheckerRegistry::CheckerInfoSet CheckerRegistry::getEnabledCheckers(
+ const AnalyzerOptions &Opts) const {
+
+ assert(std::is_sorted(Checkers.begin(), Checkers.end(), checkerNameLT) &&
+ "In order to efficiently gather checkers, this function expects them "
+ "to be already sorted!");
+
+ CheckerInfoSet enabledCheckers;
+ const auto end = Checkers.cend();
+
+ for (const std::pair<std::string, bool> &opt : Opts.CheckersControlList) {
+ // Use a binary search to find the possible start of the package.
+ CheckerRegistry::CheckerInfo packageInfo(nullptr, opt.first, "", "");
+ auto firstRelatedChecker =
+ std::lower_bound(Checkers.cbegin(), end, packageInfo, checkerNameLT);
+
+ if (firstRelatedChecker == end ||
+ !isInPackage(*firstRelatedChecker, opt.first)) {
+ Diags.Report(diag::err_unknown_analyzer_checker) << opt.first;
+ Diags.Report(diag::note_suggest_disabling_all_checkers);
+ return {};
+ }
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single
+ // checker.
+ size_t size = 1;
+ llvm::StringMap<size_t>::const_iterator packageSize =
+ Packages.find(opt.first);
+ if (packageSize != Packages.end())
+ size = packageSize->getValue();
+
+ // Step through all the checkers in the package.
+ for (auto lastRelatedChecker = firstRelatedChecker+size;
+ firstRelatedChecker != lastRelatedChecker; ++firstRelatedChecker)
+ if (opt.second)
+ enabledCheckers.insert(&*firstRelatedChecker);
+ else
+ enabledCheckers.remove(&*firstRelatedChecker);
+ }
+
+ return enabledCheckers;
+}
+
+void CheckerRegistry::addChecker(InitializationFunction Fn, StringRef Name,
+ StringRef Desc, StringRef DocsUri) {
+ Checkers.emplace_back(Fn, Name, Desc, DocsUri);
+
+ // Record the presence of the checker in its packages.
+ StringRef packageName, leafName;
+ std::tie(packageName, leafName) = Name.rsplit(PackageSeparator);
+ while (!leafName.empty()) {
+ Packages[packageName] += 1;
+ std::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+ }
+}
+
+void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
+ const AnalyzerOptions &Opts) const {
+ // Sort checkers for efficient collection.
+ llvm::sort(Checkers, checkerNameLT);
+
+ // Collect checkers enabled by the options.
+ CheckerInfoSet enabledCheckers = getEnabledCheckers(Opts);
+
+ // Initialize the CheckerManager with all enabled checkers.
+ for (const auto *i : enabledCheckers) {
+ checkerMgr.setCurrentCheckName(CheckName(i->FullName));
+ i->Initialize(checkerMgr);
+ }
+}
+
+void CheckerRegistry::validateCheckerOptions(
+ const AnalyzerOptions &opts) const {
+ for (const auto &config : opts.Config) {
+ size_t pos = config.getKey().find(':');
+ if (pos == StringRef::npos)
+ continue;
+
+ bool hasChecker = false;
+ StringRef checkerName = config.getKey().substr(0, pos);
+ for (const auto &checker : Checkers) {
+ if (checker.FullName.startswith(checkerName) &&
+ (checker.FullName.size() == pos || checker.FullName[pos] == '.')) {
+ hasChecker = true;
+ break;
+ }
+ }
+ if (!hasChecker)
+ Diags.Report(diag::err_unknown_analyzer_checker) << checkerName;
+ }
+}
+
+void CheckerRegistry::printHelp(raw_ostream &out,
+ size_t maxNameChars) const {
+ // FIXME: Alphabetical sort puts 'experimental' in the middle.
+ // Would it be better to name it '~experimental' or something else
+ // that's ASCIIbetically last?
+ llvm::sort(Checkers, checkerNameLT);
+
+ // FIXME: Print available packages.
+
+ out << "CHECKERS:\n";
+
+ // Find the maximum option length.
+ size_t optionFieldWidth = 0;
+ for (const auto &i : Checkers) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ // Package.Name Description [Hidden]
+ size_t nameLength = i.FullName.size();
+ if (nameLength <= maxNameChars)
+ optionFieldWidth = std::max(optionFieldWidth, nameLength);
+ }
+
+ const size_t initialPad = 2;
+ for (const auto &i : Checkers) {
+ out.indent(initialPad) << i.FullName;
+
+ int pad = optionFieldWidth - i.FullName.size();
+
+ // Break on long option names.
+ if (pad < 0) {
+ out << '\n';
+ pad = optionFieldWidth + initialPad;
+ }
+ out.indent(pad + 2) << i.Desc;
+
+ out << '\n';
+ }
+}
+
+void CheckerRegistry::printList(raw_ostream &out,
+ const AnalyzerOptions &opts) const {
+ // Sort checkers for efficient collection.
+ llvm::sort(Checkers, checkerNameLT);
+
+ // Collect checkers enabled by the options.
+ CheckerInfoSet enabledCheckers = getEnabledCheckers(opts);
+
+ for (const auto *i : enabledCheckers)
+ out << i->FullName << '\n';
+}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index c43d30440c8f..b1927c8401d6 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -48,7 +48,7 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
FileID mainFileID = SM.getMainFileID();
AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
- llvm::StringRef modelPath = analyzerOpts->Config["model-path"];
+ llvm::StringRef modelPath = analyzerOpts->ModelPath;
llvm::SmallString<128> fileName;
diff --git a/lib/StaticAnalyzer/README.txt b/lib/StaticAnalyzer/README.txt
index d4310c57d849..79a16ec7673d 100644
--- a/lib/StaticAnalyzer/README.txt
+++ b/lib/StaticAnalyzer/README.txt
@@ -69,23 +69,23 @@ triggered the problem.
= Notes about C++ =
-Since now constructors are seen before the variable that is constructed
-in the CFG, we create a temporary object as the destination region that
+Since now constructors are seen before the variable that is constructed
+in the CFG, we create a temporary object as the destination region that
is constructed into. See ExprEngine::VisitCXXConstructExpr().
In ExprEngine::processCallExit(), we always bind the object region to the
evaluated CXXConstructExpr. Then in VisitDeclStmt(), we compute the
corresponding lazy compound value if the variable is not a reference, and
bind the variable region to the lazy compound value. If the variable
-is a reference, just use the object region as the initilizer value.
+is a reference, just use the object region as the initializer value.
Before entering a C++ method (or ctor/dtor), the 'this' region is bound
-to the object region. In ctors, we synthesize 'this' region with
+to the object region. In ctors, we synthesize 'this' region with
CXXRecordDecl*, which means we do not use type qualifiers. In methods, we
-synthesize 'this' region with CXXMethodDecl*, which has getThisType()
+synthesize 'this' region with CXXMethodDecl*, which has getThisType()
taking type qualifiers into account. It does not matter we use qualified
'this' region in one method and unqualified 'this' region in another
-method, because we only need to ensure the 'this' region is consistent
+method, because we only need to ensure the 'this' region is consistent
when we synthesize it and create it directly from CXXThisExpr in a single
method call.
diff --git a/lib/Tooling/ASTDiff/ASTDiff.cpp b/lib/Tooling/ASTDiff/ASTDiff.cpp
index a5d2d1d24729..592e8572c770 100644
--- a/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -741,7 +741,7 @@ public:
List.pop();
}
// TODO this is here to get a stable output, not a good heuristic
- llvm::sort(Result.begin(), Result.end());
+ llvm::sort(Result);
return Result;
}
int peekMax() const {
@@ -845,9 +845,8 @@ void ASTDiff::Impl::matchBottomUp(Mapping &M) const {
}
bool Matched = M.hasSrc(Id1);
const Node &N1 = T1.getNode(Id1);
- bool MatchedChildren =
- std::any_of(N1.Children.begin(), N1.Children.end(),
- [&](NodeId Child) { return M.hasSrc(Child); });
+ bool MatchedChildren = llvm::any_of(
+ N1.Children, [&](NodeId Child) { return M.hasSrc(Child); });
if (Matched || !MatchedChildren)
continue;
NodeId Id2 = findCandidate(M, Id1);
diff --git a/lib/Tooling/AllTUsExecution.cpp b/lib/Tooling/AllTUsExecution.cpp
index b761556ee76b..0f172b782963 100644
--- a/lib/Tooling/AllTUsExecution.cpp
+++ b/lib/Tooling/AllTUsExecution.cpp
@@ -53,6 +53,12 @@ private:
} // namespace
+llvm::cl::opt<std::string>
+ Filter("filter",
+ llvm::cl::desc("Only process files that match this filter. "
+ "This flag only applies to all-TUs."),
+ llvm::cl::init(".*"));
+
AllTUsToolExecutor::AllTUsToolExecutor(
const CompilationDatabase &Compilations, unsigned ThreadCount,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
@@ -90,7 +96,12 @@ llvm::Error AllTUsToolExecutor::execute(
llvm::errs() << Msg.str() << "\n";
};
- auto Files = Compilations.getAllFiles();
+ std::vector<std::string> Files;
+ llvm::Regex RegexFilter(Filter);
+ for (const auto& File : Compilations.getAllFiles()) {
+ if (RegexFilter.match(File))
+ Files.push_back(File);
+ }
// Add a counter to track the progress.
const std::string TotalNumStr = std::to_string(Files.size());
unsigned Counter = 0;
@@ -104,7 +115,12 @@ llvm::Error AllTUsToolExecutor::execute(
{
llvm::ThreadPool Pool(ThreadCount == 0 ? llvm::hardware_concurrency()
: ThreadCount);
-
+ llvm::SmallString<128> InitialWorkingDir;
+ if (auto EC = llvm::sys::fs::current_path(InitialWorkingDir)) {
+ InitialWorkingDir = "";
+ llvm::errs() << "Error while getting current working directory: "
+ << EC.message() << "\n";
+ }
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
@@ -116,12 +132,21 @@ llvm::Error AllTUsToolExecutor::execute(
for (const auto &FileAndContent : OverlayFiles)
Tool.mapVirtualFile(FileAndContent.first(),
FileAndContent.second);
+ // Do not restore working dir from multiple threads to avoid races.
+ Tool.setRestoreWorkingDir(false);
if (Tool.run(Action.first.get()))
AppendError(llvm::Twine("Failed to run action on ") + Path +
"\n");
},
File);
}
+ // Make sure all tasks have finished before resetting the working directory.
+ Pool.wait();
+ if (!InitialWorkingDir.empty()) {
+ if (auto EC = llvm::sys::fs::set_current_path(InitialWorkingDir))
+ llvm::errs() << "Error while restoring working directory: "
+ << EC.message() << "\n";
+ }
}
if (!ErrorMsg.empty())
@@ -133,7 +158,8 @@ llvm::Error AllTUsToolExecutor::execute(
static llvm::cl::opt<unsigned> ExecutorConcurrency(
"execute-concurrency",
llvm::cl::desc("The number of threads used to process all files in "
- "parallel. Set to 0 for hardware concurrency."),
+ "parallel. Set to 0 for hardware concurrency. "
+ "This flag only applies to all-TUs."),
llvm::cl::init(0));
class AllTUsToolExecutorPlugin : public ToolExecutorPlugin {
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index 031d8b51dec4..4b671e299ab7 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -35,5 +35,6 @@ add_clang_library(clangTooling
clangFrontend
clangLex
clangRewrite
+ clangSerialization
clangToolingCore
)
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index 31a769fa21e5..cce8e1f1df24 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -218,6 +218,25 @@ private:
ArrayRef<std::string> Arr;
};
+// Filter of tools unused flags such as -no-integrated-as and -Wa,*.
+// They are not used for syntax checking, and could confuse targets
+// which don't support these options.
+struct FilterUnusedFlags {
+ bool operator() (StringRef S) {
+ return (S == "-no-integrated-as") || S.startswith("-Wa,");
+ }
+};
+
+std::string GetClangToolCommand() {
+ static int Dummy;
+ std::string ClangExecutable =
+ llvm::sys::fs::getMainExecutable("clang", (void *)&Dummy);
+ SmallString<128> ClangToolPath;
+ ClangToolPath = llvm::sys::path::parent_path(ClangExecutable);
+ llvm::sys::path::append(ClangToolPath, "clang-tool");
+ return ClangToolPath.str();
+}
+
} // namespace
/// Strips any positional args and possible argv[0] from a command-line
@@ -257,9 +276,10 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
Diagnostics));
NewDriver->setCheckInputsExist(false);
- // This becomes the new argv[0]. The value is actually not important as it
- // isn't used for invoking Tools.
- Args.insert(Args.begin(), "clang-tool");
+ // This becomes the new argv[0]. The value is used to detect libc++ include
+ // dirs on Mac, it isn't used for other platforms.
+ std::string Argv0 = GetClangToolCommand();
+ Args.insert(Args.begin(), Argv0.c_str());
// By adding -c, we force the driver to treat compilation as the last phase.
// It will then issue warnings via Diagnostics about un-used options that
@@ -275,10 +295,7 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
// up with no jobs but then this is the user's fault.
Args.push_back("placeholder.cpp");
- // Remove -no-integrated-as; it's not used for syntax checking,
- // and it confuses targets which don't support this option.
- Args.erase(std::remove_if(Args.begin(), Args.end(),
- MatchesAny(std::string("-no-integrated-as"))),
+ Args.erase(std::remove_if(Args.begin(), Args.end(), FilterUnusedFlags()),
Args.end());
const std::unique_ptr<driver::Compilation> Compilation(
@@ -291,9 +308,11 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
CompileJobAnalyzer CompileAnalyzer;
for (const auto &Cmd : Jobs) {
- // Collect only for Assemble and Compile jobs. If we do all jobs we get
- // duplicates since Link jobs point to Assemble jobs as inputs.
+ // Collect only for Assemble, Backend, and Compile jobs. If we do all jobs
+ // we get duplicates since Link jobs point to Assemble jobs as inputs.
+ // -flto* flags make the BackendJobClass, which still needs analyzer.
if (Cmd.getSource().getKind() == driver::Action::AssembleJobClass ||
+ Cmd.getSource().getKind() == driver::Action::BackendJobClass ||
Cmd.getSource().getKind() == driver::Action::CompileJobClass) {
CompileAnalyzer.run(&Cmd.getSource());
}
@@ -358,7 +377,7 @@ FixedCompilationDatabase::loadFromFile(StringRef Path, std::string &ErrorMsg) {
FixedCompilationDatabase::
FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
- std::vector<std::string> ToolCommandLine(1, "clang-tool");
+ std::vector<std::string> ToolCommandLine(1, GetClangToolCommand());
ToolCommandLine.insert(ToolCommandLine.end(),
CommandLine.begin(), CommandLine.end());
CompileCommands.emplace_back(Directory, StringRef(),
diff --git a/lib/Tooling/Core/Diagnostic.cpp b/lib/Tooling/Core/Diagnostic.cpp
index 9e4833f2eff4..e3a33d9a3755 100644
--- a/lib/Tooling/Core/Diagnostic.cpp
+++ b/lib/Tooling/Core/Diagnostic.cpp
@@ -23,10 +23,15 @@ DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message)
DiagnosticMessage::DiagnosticMessage(llvm::StringRef Message,
const SourceManager &Sources,
SourceLocation Loc)
- : Message(Message) {
+ : Message(Message), FileOffset(0) {
assert(Loc.isValid() && Loc.isFileID());
FilePath = Sources.getFilename(Loc);
- FileOffset = Sources.getFileOffset(Loc);
+
+ // Don't store offset in the scratch space. It doesn't tell anything to the
+ // user. Moreover, it depends on the history of macro expansions and thus
+ // prevents deduplication of warnings in headers.
+ if (!FilePath.empty())
+ FileOffset = Sources.getFileOffset(Loc);
}
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
diff --git a/lib/Tooling/Core/Lookup.cpp b/lib/Tooling/Core/Lookup.cpp
index 6edf61b8050d..cc448d144e2c 100644
--- a/lib/Tooling/Core/Lookup.cpp
+++ b/lib/Tooling/Core/Lookup.cpp
@@ -14,6 +14,7 @@
#include "clang/Tooling/Core/Lookup.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclarationName.h"
using namespace clang;
using namespace clang::tooling;
@@ -114,6 +115,37 @@ static bool isFullyQualified(const NestedNameSpecifier *NNS) {
return false;
}
+// Returns true if spelling symbol \p QName as \p Spelling in \p UseContext is
+// ambiguous. For example, if QName is "::y::bar" and the spelling is "y::bar"
+// in `UseContext` "a" that contains a nested namespace "a::y", then "y::bar"
+// can be resolved to ::a::y::bar, which can cause compile error.
+// FIXME: consider using namespaces.
+static bool isAmbiguousNameInScope(StringRef Spelling, StringRef QName,
+ const DeclContext &UseContext) {
+ assert(QName.startswith("::"));
+ if (Spelling.startswith("::"))
+ return false;
+
+ // Lookup the first component of Spelling in all enclosing namespaces and
+ // check if there is any existing symbols with the same name but in different
+ // scope.
+ StringRef Head = Spelling.split("::").first;
+
+ llvm::SmallVector<const NamespaceDecl *, 4> UseNamespaces =
+ getAllNamedNamespaces(&UseContext);
+ auto &AST = UseContext.getParentASTContext();
+ StringRef TrimmedQName = QName.substr(2);
+ for (const auto *NS : UseNamespaces) {
+ auto LookupRes = NS->lookup(DeclarationName(&AST.Idents.get(Head)));
+ if (!LookupRes.empty()) {
+ for (const NamedDecl *Res : LookupRes)
+ if (!TrimmedQName.startswith(Res->getQualifiedNameAsString()))
+ return true;
+ }
+ }
+ return false;
+}
+
std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
@@ -146,6 +178,14 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
// figure out how good a namespace match we have with our destination type.
// We work backwards (from most specific possible namespace to least
// specific).
- return getBestNamespaceSubstr(UseContext, ReplacementString,
- isFullyQualified(Use));
+ StringRef Suggested = getBestNamespaceSubstr(UseContext, ReplacementString,
+ isFullyQualified(Use));
+ // Use the fully qualified name if the suggested name is ambiguous.
+ // FIXME: consider re-shortening the name until the name is not ambiguous. We
+ // are not doing this because ambiguity is pretty bad and we should not try to
+ // be clever in handling such cases. Making this noticeable to users seems to
+ // be a better option.
+ return isAmbiguousNameInScope(Suggested, ReplacementString, *UseContext)
+ ? ReplacementString
+ : Suggested;
}
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index 67e2dcfd73c1..3b7e39814afa 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -19,7 +19,6 @@
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "clang/Rewrite/Core/Rewriter.h"
@@ -29,6 +28,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -483,12 +483,11 @@ Replacements Replacements::merge(const Replacements &ReplacesToMerge) const {
// Returns a set of non-overlapping and sorted ranges that is equivalent to
// \p Ranges.
static std::vector<Range> combineAndSortRanges(std::vector<Range> Ranges) {
- llvm::sort(Ranges.begin(), Ranges.end(),
- [](const Range &LHS, const Range &RHS) {
- if (LHS.getOffset() != RHS.getOffset())
- return LHS.getOffset() < RHS.getOffset();
- return LHS.getLength() < RHS.getLength();
- });
+ llvm::sort(Ranges, [](const Range &LHS, const Range &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ return LHS.getLength() < RHS.getLength();
+ });
std::vector<Range> Result;
for (const auto &R : Ranges) {
if (Result.empty() ||
@@ -584,8 +583,8 @@ llvm::Expected<std::string> applyAllReplacements(StringRef Code,
if (Replaces.empty())
return Code.str();
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
diff --git a/lib/Tooling/Execution.cpp b/lib/Tooling/Execution.cpp
index 7ae67747acb2..9ddb18a57b46 100644
--- a/lib/Tooling/Execution.cpp
+++ b/lib/Tooling/Execution.cpp
@@ -16,7 +16,7 @@ LLVM_INSTANTIATE_REGISTRY(clang::tooling::ToolExecutorPluginRegistry)
namespace clang {
namespace tooling {
-static llvm::cl::opt<std::string>
+llvm::cl::opt<std::string>
ExecutorName("executor", llvm::cl::desc("The name of the executor to use."),
llvm::cl::init("standalone"));
diff --git a/lib/Tooling/Inclusions/HeaderIncludes.cpp b/lib/Tooling/Inclusions/HeaderIncludes.cpp
index 99c0866a6855..c74ad0b9cd56 100644
--- a/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -10,6 +10,7 @@
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/Support/FormatVariadic.h"
namespace clang {
namespace tooling {
@@ -23,8 +24,7 @@ LangOptions createLangOpts() {
LangOpts.LineComment = 1;
LangOpts.CXXOperatorNames = 1;
LangOpts.Bool = 1;
- LangOpts.ObjC1 = 1;
- LangOpts.ObjC2 = 1;
+ LangOpts.ObjC = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
LangOpts.DeclSpecKeyword = 1; // To get __declspec.
LangOpts.WChar = 1; // To get wchar_t
@@ -181,7 +181,7 @@ bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
llvm::sys::path::stem(IncludeName.drop_front(1).drop_back(1));
if (FileStem.startswith(HeaderStem) ||
FileStem.startswith_lower(HeaderStem)) {
- llvm::Regex MainIncludeRegex((HeaderStem + Style.IncludeIsMainRegex).str(),
+ llvm::Regex MainIncludeRegex(HeaderStem.str() + Style.IncludeIsMainRegex,
llvm::Regex::IgnoreCase);
if (MainIncludeRegex.match(FileStem))
return true;
@@ -275,8 +275,8 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
if ((IsAngled && StringRef(Inc.Name).startswith("<")) ||
(!IsAngled && StringRef(Inc.Name).startswith("\"")))
return llvm::None;
- std::string Quoted = IsAngled ? ("<" + IncludeName + ">").str()
- : ("\"" + IncludeName + "\"").str();
+ std::string Quoted =
+ llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName);
StringRef QuotedName = Quoted;
int Priority = Categories.getIncludePriority(
QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
@@ -293,7 +293,7 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
}
}
assert(InsertOffset <= Code.size());
- std::string NewInclude = ("#include " + QuotedName + "\n").str();
+ std::string NewInclude = llvm::formatv("#include {0}\n", QuotedName);
// When inserting headers at end of the code, also append '\n' to the code
// if it does not end with '\n'.
// FIXME: when inserting multiple #includes at the end of code, only one
diff --git a/lib/Tooling/InterpolatingCompilationDatabase.cpp b/lib/Tooling/InterpolatingCompilationDatabase.cpp
index bc564584bd01..4d0d84f660a2 100644
--- a/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -48,6 +48,7 @@
#include "clang/Frontend/LangStandard.h"
#include "clang/Tooling/CompilationDatabase.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
@@ -123,55 +124,79 @@ static types::ID foldType(types::ID Lang) {
struct TransferableCommand {
// Flags that should not apply to all files are stripped from CommandLine.
CompileCommand Cmd;
- // Language detected from -x or the filename.
- types::ID Type = types::TY_INVALID;
+ // Language detected from -x or the filename. Never TY_INVALID.
+ Optional<types::ID> Type;
// Standard specified by -std.
LangStandard::Kind Std = LangStandard::lang_unspecified;
+ // Whether the command line is for the cl-compatible driver.
+ bool ClangCLMode;
TransferableCommand(CompileCommand C)
- : Cmd(std::move(C)), Type(guessType(Cmd.Filename)) {
- std::vector<std::string> NewArgs = {Cmd.CommandLine.front()};
+ : Cmd(std::move(C)), Type(guessType(Cmd.Filename)),
+ ClangCLMode(checkIsCLMode(Cmd.CommandLine)) {
+ std::vector<std::string> OldArgs = std::move(Cmd.CommandLine);
+ Cmd.CommandLine.clear();
+
+ // Wrap the old arguments in an InputArgList.
+ llvm::opt::InputArgList ArgList;
+ {
+ SmallVector<const char *, 16> TmpArgv;
+ for (const std::string &S : OldArgs)
+ TmpArgv.push_back(S.c_str());
+ ArgList = {TmpArgv.begin(), TmpArgv.end()};
+ }
+
// Parse the old args in order to strip out and record unwanted flags.
+ // We parse each argument individually so that we can retain the exact
+ // spelling of each argument; re-rendering is lossy for aliased flags.
+ // E.g. in CL mode, /W4 maps to -Wall.
auto OptTable = clang::driver::createDriverOptTable();
- std::vector<const char *> Argv;
- for (unsigned I = 1; I < Cmd.CommandLine.size(); ++I)
- Argv.push_back(Cmd.CommandLine[I].c_str());
- unsigned MissingI, MissingC;
- auto ArgList = OptTable->ParseArgs(Argv, MissingI, MissingC);
- for (const auto *Arg : ArgList) {
- const auto &option = Arg->getOption();
+ Cmd.CommandLine.emplace_back(OldArgs.front());
+ for (unsigned Pos = 1; Pos < OldArgs.size();) {
+ using namespace driver::options;
+
+ const unsigned OldPos = Pos;
+ std::unique_ptr<llvm::opt::Arg> Arg(OptTable->ParseOneArg(
+ ArgList, Pos,
+ /* Include */ClangCLMode ? CoreOption | CLOption : 0,
+ /* Exclude */ClangCLMode ? 0 : CLOption));
+
+ if (!Arg)
+ continue;
+
+ const llvm::opt::Option &Opt = Arg->getOption();
+
// Strip input and output files.
- if (option.matches(clang::driver::options::OPT_INPUT) ||
- option.matches(clang::driver::options::OPT_o)) {
+ if (Opt.matches(OPT_INPUT) || Opt.matches(OPT_o) ||
+ (ClangCLMode && (Opt.matches(OPT__SLASH_Fa) ||
+ Opt.matches(OPT__SLASH_Fe) ||
+ Opt.matches(OPT__SLASH_Fi) ||
+ Opt.matches(OPT__SLASH_Fo))))
continue;
- }
+
// Strip -x, but record the overridden language.
- if (option.matches(clang::driver::options::OPT_x)) {
- for (const char *Value : Arg->getValues())
- Type = types::lookupTypeForTypeSpecifier(Value);
+ if (const auto GivenType = tryParseTypeArg(*Arg)) {
+ Type = *GivenType;
continue;
}
- // Strip --std, but record the value.
- if (option.matches(clang::driver::options::OPT_std_EQ)) {
- for (const char *Value : Arg->getValues()) {
- Std = llvm::StringSwitch<LangStandard::Kind>(Value)
-#define LANGSTANDARD(id, name, lang, desc, features) \
- .Case(name, LangStandard::lang_##id)
-#define LANGSTANDARD_ALIAS(id, alias) .Case(alias, LangStandard::lang_##id)
-#include "clang/Frontend/LangStandards.def"
- .Default(Std);
- }
+
+ // Strip -std, but record the value.
+ if (const auto GivenStd = tryParseStdArg(*Arg)) {
+ if (*GivenStd != LangStandard::lang_unspecified)
+ Std = *GivenStd;
continue;
}
- llvm::opt::ArgStringList ArgStrs;
- Arg->render(ArgList, ArgStrs);
- NewArgs.insert(NewArgs.end(), ArgStrs.begin(), ArgStrs.end());
+
+ Cmd.CommandLine.insert(Cmd.CommandLine.end(),
+ OldArgs.data() + OldPos, OldArgs.data() + Pos);
}
- Cmd.CommandLine = std::move(NewArgs);
if (Std != LangStandard::lang_unspecified) // -std take precedence over -x
Type = toType(LangStandard::getLangStandardForKind(Std).getLanguage());
- Type = foldType(Type);
+ Type = foldType(*Type);
+ // The contract is to store None instead of TY_INVALID.
+ if (Type == types::TY_INVALID)
+ Type = llvm::None;
}
// Produce a CompileCommand for \p filename, based on this one.
@@ -181,25 +206,43 @@ struct TransferableCommand {
bool TypeCertain;
auto TargetType = guessType(Filename, &TypeCertain);
// If the filename doesn't determine the language (.h), transfer with -x.
- if (!TypeCertain) {
+ if (TargetType != types::TY_INVALID && !TypeCertain && Type) {
TargetType = types::onlyPrecompileType(TargetType) // header?
- ? types::lookupHeaderTypeForSourceType(Type)
- : Type;
- Result.CommandLine.push_back("-x");
- Result.CommandLine.push_back(types::getTypeName(TargetType));
+ ? types::lookupHeaderTypeForSourceType(*Type)
+ : *Type;
+ if (ClangCLMode) {
+ const StringRef Flag = toCLFlag(TargetType);
+ if (!Flag.empty())
+ Result.CommandLine.push_back(Flag);
+ } else {
+ Result.CommandLine.push_back("-x");
+ Result.CommandLine.push_back(types::getTypeName(TargetType));
+ }
}
// --std flag may only be transferred if the language is the same.
// We may consider "translating" these, e.g. c++11 -> c11.
if (Std != LangStandard::lang_unspecified && foldType(TargetType) == Type) {
- Result.CommandLine.push_back(
- "-std=" +
- std::string(LangStandard::getLangStandardForKind(Std).getName()));
+ Result.CommandLine.emplace_back((
+ llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
+ LangStandard::getLangStandardForKind(Std).getName()).str());
}
Result.CommandLine.push_back(Filename);
return Result;
}
private:
+ // Determine whether the given command line is intended for the CL driver.
+ static bool checkIsCLMode(ArrayRef<std::string> CmdLine) {
+ // First look for --driver-mode.
+ for (StringRef S : llvm::reverse(CmdLine)) {
+ if (S.consume_front("--driver-mode="))
+ return S == "cl";
+ }
+
+ // Otherwise just check the clang executable file name.
+ return llvm::sys::path::stem(CmdLine.front()).endswith_lower("cl");
+ }
+
// Map the language from the --std flag to that of the -x flag.
static types::ID toType(InputKind::Language Lang) {
switch (Lang) {
@@ -215,64 +258,111 @@ private:
return types::TY_INVALID;
}
}
+
+ // Convert a file type to the matching CL-style type flag.
+ static StringRef toCLFlag(types::ID Type) {
+ switch (Type) {
+ case types::TY_C:
+ case types::TY_CHeader:
+ return "/TC";
+ case types::TY_CXX:
+ case types::TY_CXXHeader:
+ return "/TP";
+ default:
+ return StringRef();
+ }
+ }
+
+ // Try to interpret the argument as a type specifier, e.g. '-x'.
+ Optional<types::ID> tryParseTypeArg(const llvm::opt::Arg &Arg) {
+ const llvm::opt::Option &Opt = Arg.getOption();
+ using namespace driver::options;
+ if (ClangCLMode) {
+ if (Opt.matches(OPT__SLASH_TC) || Opt.matches(OPT__SLASH_Tc))
+ return types::TY_C;
+ if (Opt.matches(OPT__SLASH_TP) || Opt.matches(OPT__SLASH_Tp))
+ return types::TY_CXX;
+ } else {
+ if (Opt.matches(driver::options::OPT_x))
+ return types::lookupTypeForTypeSpecifier(Arg.getValue());
+ }
+ return None;
+ }
+
+ // Try to interpret the argument as '-std='.
+ Optional<LangStandard::Kind> tryParseStdArg(const llvm::opt::Arg &Arg) {
+ using namespace driver::options;
+ if (Arg.getOption().matches(ClangCLMode ? OPT__SLASH_std : OPT_std_EQ)) {
+ return llvm::StringSwitch<LangStandard::Kind>(Arg.getValue())
+#define LANGSTANDARD(id, name, lang, ...) .Case(name, LangStandard::lang_##id)
+#define LANGSTANDARD_ALIAS(id, alias) .Case(alias, LangStandard::lang_##id)
+#include "clang/Frontend/LangStandards.def"
+#undef LANGSTANDARD_ALIAS
+#undef LANGSTANDARD
+ .Default(LangStandard::lang_unspecified);
+ }
+ return None;
+ }
};
-// CommandIndex does the real work: given a filename, it produces the best
-// matching TransferableCommand by matching filenames. Basic strategy:
+// Given a filename, FileIndex picks the best matching file from the underlying
+// DB. This is the proxy file whose CompileCommand will be reused. The
+// heuristics incorporate file name, extension, and directory structure.
+// Strategy:
// - Build indexes of each of the substrings we want to look up by.
// These indexes are just sorted lists of the substrings.
-// - Forward requests to the inner CDB. If it fails, we must pick a proxy.
// - Each criterion corresponds to a range lookup into the index, so we only
// need O(log N) string comparisons to determine scores.
-// - We then break ties among the candidates with the highest score.
-class CommandIndex {
+//
+// Apart from path proximity signals, also takes file extensions into account
+// when scoring the candidates.
+class FileIndex {
public:
- CommandIndex(std::vector<TransferableCommand> AllCommands)
- : Commands(std::move(AllCommands)), Strings(Arena) {
+ FileIndex(std::vector<std::string> Files)
+ : OriginalPaths(std::move(Files)), Strings(Arena) {
// Sort commands by filename for determinism (index is a tiebreaker later).
- llvm::sort(
- Commands.begin(), Commands.end(),
- [](const TransferableCommand &Left, const TransferableCommand &Right) {
- return Left.Cmd.Filename < Right.Cmd.Filename;
- });
- for (size_t I = 0; I < Commands.size(); ++I) {
- StringRef Path =
- Strings.save(StringRef(Commands[I].Cmd.Filename).lower());
- Paths.push_back({Path, I});
+ llvm::sort(OriginalPaths);
+ Paths.reserve(OriginalPaths.size());
+ Types.reserve(OriginalPaths.size());
+ Stems.reserve(OriginalPaths.size());
+ for (size_t I = 0; I < OriginalPaths.size(); ++I) {
+ StringRef Path = Strings.save(StringRef(OriginalPaths[I]).lower());
+
+ Paths.emplace_back(Path, I);
+ Types.push_back(foldType(guessType(Path)));
Stems.emplace_back(sys::path::stem(Path), I);
auto Dir = ++sys::path::rbegin(Path), DirEnd = sys::path::rend(Path);
for (int J = 0; J < DirectorySegmentsIndexed && Dir != DirEnd; ++J, ++Dir)
if (Dir->size() > ShortDirectorySegment) // not trivial ones
Components.emplace_back(*Dir, I);
}
- llvm::sort(Paths.begin(), Paths.end());
- llvm::sort(Stems.begin(), Stems.end());
- llvm::sort(Components.begin(), Components.end());
+ llvm::sort(Paths);
+ llvm::sort(Stems);
+ llvm::sort(Components);
}
- bool empty() const { return Commands.empty(); }
+ bool empty() const { return Paths.empty(); }
- // Returns the command that best fits OriginalFilename.
- // Candidates with PreferLanguage will be chosen over others (unless it's
- // TY_INVALID, or all candidates are bad).
- const TransferableCommand &chooseProxy(StringRef OriginalFilename,
- types::ID PreferLanguage) const {
+ // Returns the path for the file that best fits OriginalFilename.
+ // Candidates with extensions matching PreferLanguage will be chosen over
+ // others (unless it's TY_INVALID, or all candidates are bad).
+ StringRef chooseProxy(StringRef OriginalFilename,
+ types::ID PreferLanguage) const {
assert(!empty() && "need at least one candidate!");
std::string Filename = OriginalFilename.lower();
auto Candidates = scoreCandidates(Filename);
std::pair<size_t, int> Best =
pickWinner(Candidates, Filename, PreferLanguage);
- DEBUG_WITH_TYPE("interpolate",
- llvm::dbgs()
- << "interpolate: chose "
- << Commands[Best.first].Cmd.Filename << " as proxy for "
- << OriginalFilename << " preferring "
- << (PreferLanguage == types::TY_INVALID
- ? "none"
- : types::getTypeName(PreferLanguage))
- << " score=" << Best.second << "\n");
- return Commands[Best.first];
+ DEBUG_WITH_TYPE(
+ "interpolate",
+ llvm::dbgs() << "interpolate: chose " << OriginalPaths[Best.first]
+ << " as proxy for " << OriginalFilename << " preferring "
+ << (PreferLanguage == types::TY_INVALID
+ ? "none"
+ : types::getTypeName(PreferLanguage))
+ << " score=" << Best.second << "\n");
+ return OriginalPaths[Best.first];
}
private:
@@ -338,7 +428,7 @@ private:
ScoredCandidate S;
S.Index = Candidate.first;
S.Preferred = PreferredLanguage == types::TY_INVALID ||
- PreferredLanguage == Commands[S.Index].Type;
+ PreferredLanguage == Types[S.Index];
S.Points = Candidate.second;
if (!S.Preferred && Best.Preferred)
continue;
@@ -371,7 +461,7 @@ private:
// If Prefix is true, it's instead the range starting with Key.
template <bool Prefix>
ArrayRef<SubstringAndIndex>
- indexLookup(StringRef Key, const std::vector<SubstringAndIndex> &Idx) const {
+ indexLookup(StringRef Key, ArrayRef<SubstringAndIndex> Idx) const {
// Use pointers as iteratiors to ease conversion of result to ArrayRef.
auto Range = std::equal_range(Idx.data(), Idx.data() + Idx.size(), Key,
Less<Prefix>());
@@ -379,8 +469,8 @@ private:
}
// Performs a point lookup into a nonempty index, returning a longest match.
- SubstringAndIndex
- longestMatch(StringRef Key, const std::vector<SubstringAndIndex> &Idx) const {
+ SubstringAndIndex longestMatch(StringRef Key,
+ ArrayRef<SubstringAndIndex> Idx) const {
assert(!Idx.empty());
// Longest substring match will be adjacent to a direct lookup.
auto It =
@@ -395,22 +485,27 @@ private:
return Prefix > PrevPrefix ? *It : *--It;
}
- std::vector<TransferableCommand> Commands; // Indexes point into this.
+ // Original paths, everything else is in lowercase.
+ std::vector<std::string> OriginalPaths;
BumpPtrAllocator Arena;
StringSaver Strings;
// Indexes of candidates by certain substrings.
// String is lowercase and sorted, index points into OriginalPaths.
std::vector<SubstringAndIndex> Paths; // Full path.
+ // Lang types obtained by guessing on the corresponding path. I-th element is
+ // a type for the I-th path.
+ std::vector<types::ID> Types;
std::vector<SubstringAndIndex> Stems; // Basename, without extension.
std::vector<SubstringAndIndex> Components; // Last path components.
};
// The actual CompilationDatabase wrapper delegates to its inner database.
-// If no match, looks up a command in CommandIndex and transfers it to the file.
+// If no match, looks up a proxy file in FileIndex and transfers its
+// command to the requested file.
class InterpolatingCompilationDatabase : public CompilationDatabase {
public:
InterpolatingCompilationDatabase(std::unique_ptr<CompilationDatabase> Inner)
- : Inner(std::move(Inner)), Index(allCommands()) {}
+ : Inner(std::move(Inner)), Index(this->Inner->getAllFiles()) {}
std::vector<CompileCommand>
getCompileCommands(StringRef Filename) const override {
@@ -421,7 +516,11 @@ public:
auto Lang = guessType(Filename, &TypeCertain);
if (!TypeCertain)
Lang = types::TY_INVALID;
- return {Index.chooseProxy(Filename, foldType(Lang)).transferTo(Filename)};
+ auto ProxyCommands =
+ Inner->getCompileCommands(Index.chooseProxy(Filename, foldType(Lang)));
+ if (ProxyCommands.empty())
+ return {};
+ return {TransferableCommand(ProxyCommands[0]).transferTo(Filename)};
}
std::vector<std::string> getAllFiles() const override {
@@ -433,18 +532,8 @@ public:
}
private:
- std::vector<TransferableCommand> allCommands() {
- std::vector<TransferableCommand> Result;
- for (auto Command : Inner->getAllCompileCommands()) {
- Result.emplace_back(std::move(Command));
- if (Result.back().Type == types::TY_INVALID)
- Result.pop_back();
- }
- return Result;
- }
-
std::unique_ptr<CompilationDatabase> Inner;
- CommandIndex Index;
+ FileIndex Index;
};
} // namespace
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index 2fa5fce279d6..b0feaa229c11 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -157,13 +157,16 @@ std::vector<std::string> unescapeCommandLine(JSONCommandLineSyntax Syntax,
return parser.parse();
}
+// This plugin locates a nearby compile_command.json file, and also infers
+// compile commands for files not present in the database.
class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
std::unique_ptr<CompilationDatabase>
loadFromDirectory(StringRef Directory, std::string &ErrorMessage) override {
SmallString<1024> JSONDatabasePath(Directory);
llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
- return JSONCompilationDatabase::loadFromFile(
+ auto Base = JSONCompilationDatabase::loadFromFile(
JSONDatabasePath, ErrorMessage, JSONCommandLineSyntax::AutoDetect);
+ return Base ? inferMissingCompileCommands(std::move(Base)) : nullptr;
}
};
diff --git a/lib/Tooling/Refactoring/ASTSelection.cpp b/lib/Tooling/Refactoring/ASTSelection.cpp
index 7123fc32cec9..b8f996d8218c 100644
--- a/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -250,8 +250,6 @@ static bool hasAnyDirectChildrenWithKind(const SelectedASTNode &Node,
namespace {
struct SelectedNodeWithParents {
- SelectedNodeWithParents(SelectedNodeWithParents &&) = default;
- SelectedNodeWithParents &operator=(SelectedNodeWithParents &&) = default;
SelectedASTNode::ReferenceType Node;
llvm::SmallVector<SelectedASTNode::ReferenceType, 8> Parents;
diff --git a/lib/Tooling/Refactoring/Extract/Extract.cpp b/lib/Tooling/Refactoring/Extract/Extract.cpp
index a12454cd29ef..7a741bdb2e91 100644
--- a/lib/Tooling/Refactoring/Extract/Extract.cpp
+++ b/lib/Tooling/Refactoring/Extract/Extract.cpp
@@ -52,7 +52,7 @@ SourceLocation computeFunctionExtractionLocation(const Decl *D) {
while (const auto *RD = dyn_cast<CXXRecordDecl>(D->getLexicalDeclContext()))
D = RD;
}
- return D->getLocStart();
+ return D->getBeginLoc();
}
} // end anonymous namespace
@@ -102,8 +102,8 @@ ExtractFunction::createSourceReplacements(RefactoringRuleContext &Context) {
assert(ParentDecl && "missing parent");
// Compute the source range of the code that should be extracted.
- SourceRange ExtractedRange(Code[0]->getLocStart(),
- Code[Code.size() - 1]->getLocEnd());
+ SourceRange ExtractedRange(Code[0]->getBeginLoc(),
+ Code[Code.size() - 1]->getEndLoc());
// FIXME (Alex L): Add code that accounts for macro locations.
ASTContext &AST = Context.getASTContext();
diff --git a/lib/Tooling/Refactoring/Rename/USRFinder.cpp b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
index 63f536c72a6f..4ed805fd504c 100644
--- a/lib/Tooling/Refactoring/Rename/USRFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRFinder.cpp
@@ -83,8 +83,8 @@ const NamedDecl *getNamedDeclAt(const ASTContext &Context,
// see. If both start and end is either before or after the point we're
// looking for the point cannot be inside of this decl. Don't even look at it.
for (auto *CurrDecl : Context.getTranslationUnitDecl()->decls()) {
- SourceLocation StartLoc = CurrDecl->getLocStart();
- SourceLocation EndLoc = CurrDecl->getLocEnd();
+ SourceLocation StartLoc = CurrDecl->getBeginLoc();
+ SourceLocation EndLoc = CurrDecl->getEndLoc();
if (StartLoc.isValid() && EndLoc.isValid() &&
SM.isBeforeInTranslationUnit(StartLoc, Point) !=
SM.isBeforeInTranslationUnit(EndLoc, Point))
diff --git a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index fb06b91118b0..7f60cf54c8ec 100644
--- a/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -117,7 +117,7 @@ SourceLocation StartLocationForType(TypeLoc TL) {
return NestedNameSpecifier.getBeginLoc();
TL = TL.getNextTypeLoc();
}
- return TL.getLocStart();
+ return TL.getBeginLoc();
}
SourceLocation EndLocationForType(TypeLoc TL) {
@@ -255,12 +255,12 @@ public:
Decl = UsingShadow->getTargetDecl();
}
- auto StartLoc = Expr->getLocStart();
+ auto StartLoc = Expr->getBeginLoc();
// For template function call expressions like `foo<int>()`, we want to
// restrict the end of location to just before the `<` character.
SourceLocation EndLoc = Expr->hasExplicitTemplateArgs()
? Expr->getLAngleLoc().getLocWithOffset(-1)
- : Expr->getLocEnd();
+ : Expr->getEndLoc();
if (const auto *MD = llvm::dyn_cast<CXXMethodDecl>(Decl)) {
if (isInUSRSet(MD)) {
@@ -576,7 +576,7 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
// Hanlde using declarations explicitly as "using a::Foo" don't trigger
// typeLoc for "a::Foo".
for (const auto *Using : Finder.getUsingDecls())
- Replace(Using->getLocStart(), Using->getLocEnd(), "using " + NewName.str());
+ Replace(Using->getBeginLoc(), Using->getEndLoc(), "using " + NewName.str());
return AtomicChanges;
}
diff --git a/lib/Tooling/StandaloneExecution.cpp b/lib/Tooling/StandaloneExecution.cpp
index 7312baf9dc77..1daf792fb86f 100644
--- a/lib/Tooling/StandaloneExecution.cpp
+++ b/lib/Tooling/StandaloneExecution.cpp
@@ -30,7 +30,7 @@ static ArgumentsAdjuster getDefaultArgumentsAdjusters() {
StandaloneToolExecutor::StandaloneToolExecutor(
const CompilationDatabase &Compilations,
llvm::ArrayRef<std::string> SourcePaths,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: Tool(Compilations, SourcePaths, std::move(PCHContainerOps),
std::move(BaseFS)),
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index a106154f4b28..63aa64a5330d 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -19,7 +19,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Job.h"
@@ -51,6 +50,7 @@
#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstring>
@@ -74,9 +74,9 @@ FrontendActionFactory::~FrontendActionFactory() = default;
// it to be based on the same framework.
/// Builds a clang driver initialized for running clang tools.
-static driver::Driver *newDriver(
- DiagnosticsEngine *Diagnostics, const char *BinaryName,
- IntrusiveRefCntPtr<vfs::FileSystem> VFS) {
+static driver::Driver *
+newDriver(DiagnosticsEngine *Diagnostics, const char *BinaryName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
driver::Driver *CompilerDriver =
new driver::Driver(BinaryName, llvm::sys::getDefaultTargetTriple(),
*Diagnostics, std::move(VFS));
@@ -155,7 +155,7 @@ namespace tooling {
bool runToolOnCodeWithArgs(
FrontendAction *ToolAction, const Twine &Code,
- llvm::IntrusiveRefCntPtr<vfs::FileSystem> VFS,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
const std::vector<std::string> &Args, const Twine &FileName,
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
@@ -178,10 +178,10 @@ bool runToolOnCodeWithArgs(
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
const FileContentMappings &VirtualMappedFiles) {
- llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
- new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
+ new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
SmallString<1024> CodeStorage;
@@ -199,7 +199,8 @@ bool runToolOnCodeWithArgs(
FileName, ToolName);
}
-std::string getAbsolutePath(StringRef File) {
+llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
+ StringRef File) {
StringRef RelativePath(File);
// FIXME: Should '.\\' be accepted on Win32?
if (RelativePath.startswith("./")) {
@@ -207,13 +208,16 @@ std::string getAbsolutePath(StringRef File) {
}
SmallString<1024> AbsolutePath = RelativePath;
- std::error_code EC = llvm::sys::fs::make_absolute(AbsolutePath);
- assert(!EC);
- (void)EC;
+ if (auto EC = FS.makeAbsolute(AbsolutePath))
+ return llvm::errorCodeToError(EC);
llvm::sys::path::native(AbsolutePath);
return AbsolutePath.str();
}
+std::string getAbsolutePath(StringRef File) {
+ return llvm::cantFail(getAbsolutePath(*llvm::vfs::getRealFileSystem(), File));
+}
+
void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
StringRef InvokedAs) {
if (!CommandLine.empty() && !InvokedAs.empty()) {
@@ -299,8 +303,12 @@ bool ToolInvocation::run() {
const std::unique_ptr<driver::Driver> Driver(
newDriver(&Diagnostics, BinaryName, Files->getVirtualFileSystem()));
- // Since the input might only be virtual, don't check whether it exists.
- Driver->setCheckInputsExist(false);
+ // The "input file not found" diagnostics from the driver are useful.
+ // The driver is only aware of the VFS working directory, but some clients
+ // change this at the FileManager level instead.
+ // In this case the checks have false positives, so skip them.
+ if (!Files->getFileSystemOpts().WorkingDir.empty())
+ Driver->setCheckInputsExist(false);
const std::unique_ptr<driver::Compilation> Compilation(
Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
if (!Compilation)
@@ -361,18 +369,18 @@ bool FrontendActionFactory::runInvocation(
const bool Success = Compiler.ExecuteAction(*ScopedToolAction);
- Files->clearStatCaches();
+ Files->clearStatCache();
return Success;
}
ClangTool::ClangTool(const CompilationDatabase &Compilations,
ArrayRef<std::string> SourcePaths,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- IntrusiveRefCntPtr<vfs::FileSystem> BaseFS)
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(std::move(PCHContainerOps)),
- OverlayFileSystem(new vfs::OverlayFileSystem(std::move(BaseFS))),
- InMemoryFileSystem(new vfs::InMemoryFileSystem),
+ OverlayFileSystem(new llvm::vfs::OverlayFileSystem(std::move(BaseFS))),
+ InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
Files(new FileManager(FileSystemOptions(), OverlayFileSystem)) {
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
appendArgumentsAdjuster(getClangStripOutputAdjuster());
@@ -411,15 +419,6 @@ int ClangTool::run(ToolAction *Action) {
// This just needs to be some symbol in the binary.
static int StaticSymbol;
- std::string InitialDirectory;
- if (llvm::ErrorOr<std::string> CWD =
- OverlayFileSystem->getCurrentWorkingDirectory()) {
- InitialDirectory = std::move(*CWD);
- } else {
- llvm::report_fatal_error("Cannot detect current path: " +
- Twine(CWD.getError().message()));
- }
-
// First insert all absolute paths into the in-memory VFS. These are global
// for all compile commands.
if (SeenWorkingDirectories.insert("/").second)
@@ -431,9 +430,33 @@ int ClangTool::run(ToolAction *Action) {
bool ProcessingFailed = false;
bool FileSkipped = false;
+ // Compute all absolute paths before we run any actions, as those will change
+ // the working directory.
+ std::vector<std::string> AbsolutePaths;
+ AbsolutePaths.reserve(SourcePaths.size());
for (const auto &SourcePath : SourcePaths) {
- std::string File(getAbsolutePath(SourcePath));
+ auto AbsPath = getAbsolutePath(*OverlayFileSystem, SourcePath);
+ if (!AbsPath) {
+ llvm::errs() << "Skipping " << SourcePath
+ << ". Error while getting an absolute path: "
+ << llvm::toString(AbsPath.takeError()) << "\n";
+ continue;
+ }
+ AbsolutePaths.push_back(std::move(*AbsPath));
+ }
+
+ // Remember the working directory in case we need to restore it.
+ std::string InitialWorkingDir;
+ if (RestoreCWD) {
+ if (auto CWD = OverlayFileSystem->getCurrentWorkingDirectory()) {
+ InitialWorkingDir = std::move(*CWD);
+ } else {
+ llvm::errs() << "Could not get working directory: "
+ << CWD.getError().message() << "\n";
+ }
+ }
+ for (llvm::StringRef File : AbsolutePaths) {
// Currently implementations of CompilationDatabase::getCompileCommands can
// change the state of the file system (e.g. prepare generated headers), so
// this method needs to run right before we invoke the tool, as the next
@@ -498,13 +521,15 @@ int ClangTool::run(ToolAction *Action) {
llvm::errs() << "Error while processing " << File << ".\n";
ProcessingFailed = true;
}
- // Return to the initial directory to correctly resolve next file by
- // relative path.
- if (OverlayFileSystem->setCurrentWorkingDirectory(InitialDirectory.c_str()))
- llvm::report_fatal_error("Cannot chdir into \"" +
- Twine(InitialDirectory) + "\n!");
}
}
+
+ if (!InitialWorkingDir.empty()) {
+ if (auto EC =
+ OverlayFileSystem->setCurrentWorkingDirectory(InitialWorkingDir))
+ llvm::errs() << "Error when trying to restore working dir: "
+ << EC.message() << "\n";
+ }
return ProcessingFailed ? 1 : (FileSkipped ? 2 : 0);
}
@@ -541,42 +566,40 @@ int ClangTool::buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs) {
return run(&Action);
}
+void ClangTool::setRestoreWorkingDir(bool RestoreCWD) {
+ this->RestoreCWD = RestoreCWD;
+}
+
namespace clang {
namespace tooling {
std::unique_ptr<ASTUnit>
-buildASTFromCode(const Twine &Code, const Twine &FileName,
+buildASTFromCode(StringRef Code, StringRef FileName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps) {
return buildASTFromCodeWithArgs(Code, std::vector<std::string>(), FileName,
"clang-tool", std::move(PCHContainerOps));
}
std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
- const Twine &Code, const std::vector<std::string> &Args,
- const Twine &FileName, const Twine &ToolName,
- std::shared_ptr<PCHContainerOperations> PCHContainerOps,
+ StringRef Code, const std::vector<std::string> &Args, StringRef FileName,
+ StringRef ToolName, std::shared_ptr<PCHContainerOperations> PCHContainerOps,
ArgumentsAdjuster Adjuster) {
- SmallString<16> FileNameStorage;
- StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
-
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
- llvm::IntrusiveRefCntPtr<vfs::OverlayFileSystem> OverlayFileSystem(
- new vfs::OverlayFileSystem(vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
+ new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new llvm::vfs::InMemoryFileSystem);
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
ToolInvocation Invocation(
- getSyntaxOnlyToolArgs(ToolName, Adjuster(Args, FileNameRef), FileNameRef),
+ getSyntaxOnlyToolArgs(ToolName, Adjuster(Args, FileName), FileName),
&Action, Files.get(), std::move(PCHContainerOps));
- SmallString<1024> CodeStorage;
- InMemoryFileSystem->addFile(FileNameRef, 0,
- llvm::MemoryBuffer::getMemBuffer(
- Code.toNullTerminatedStringRef(CodeStorage)));
+ InMemoryFileSystem->addFile(FileName, 0,
+ llvm::MemoryBuffer::getMemBufferCopy(Code));
if (!Invocation.run())
return nullptr;